diff --git a/cleanroom/datasets/preprocessor_time.py b/cleanroom/datasets/preprocessor_time.py new file mode 100644 index 0000000000000000000000000000000000000000..031324af56bbd6b7ccc4e9844c88b2bd16fec164 --- /dev/null +++ b/cleanroom/datasets/preprocessor_time.py @@ -0,0 +1,347 @@ + +import pandas as pd +import torch +import h5py +import json +from typing import Dict, Set, List, Tuple +from collections import defaultdict +import re +from io import StringIO + +# --- Helper Functions --- + +def load_boundary_nodes_from_csvs(boundary_csv_folder_path: str, target_column_name: str) -> Dict[str, Set[int]]: + # This function appears correct and has been kept as is. + # ... (function content is unchanged) ... + boundary_nodes_map: Dict[str, Set[int]] = {} + all_seen_nodes: Set[int] = set() + walls_nodes: Set[int] = set() + everything_csv_path = None + + print(f"Scanning for boundary CSV files in: {boundary_csv_folder_path}") + + if not os.path.isdir(boundary_csv_folder_path): + print(f"Error: Folder not found at {boundary_csv_folder_path}") + return boundary_nodes_map + + # Step 1: Process 'walls.csv' first + walls_file_path = os.path.join(boundary_csv_folder_path, "walls.csv") + if os.path.exists(walls_file_path): + print(f"\nProcessing 'walls.csv'...") + try: + df_walls = pd.read_csv(walls_file_path, skiprows=5, header=0, engine='python') + if target_column_name in df_walls.columns: + walls_nodes = set(pd.to_numeric(df_walls[target_column_name], errors='coerce').dropna().astype(int).tolist()) + boundary_nodes_map['walls'] = walls_nodes + all_seen_nodes.update(walls_nodes) + print(f" - Loaded {len(walls_nodes)} nodes for boundary type: 'walls'.") + else: + print(f" - Warning: 'walls.csv' does not contain the expected column '{target_column_name}'. Skipping walls assignment.") + except pd.errors.EmptyDataError: + print(f" - Warning: 'walls.csv' is empty after skipping rows. Skipping walls assignment.") + except Exception as e: + print(f" - Error reading 'walls.csv': {e}. Skipping walls assignment.") + else: + print("\n'walls.csv' not found in the specified directory.") + + # Step 2: Process all other CSVs + print("\nProcessing other boundary CSVs (excluding walls.csv and everything.csv)...") + for filename in os.listdir(boundary_csv_folder_path): + if filename.endswith(".csv") and filename not in ["walls.csv", "everything.csv"]: + file_path = os.path.join(boundary_csv_folder_path, filename) + boundary_type = os.path.splitext(filename)[0] + try: + df = pd.read_csv(file_path, skiprows=5, header=0, engine='python') + if target_column_name in df.columns: + nodes_from_file = set(pd.to_numeric(df[target_column_name], errors='coerce').dropna().astype(int).tolist()) + filtered_nodes = nodes_from_file - walls_nodes + if filtered_nodes: + boundary_nodes_map[boundary_type] = filtered_nodes + all_seen_nodes.update(filtered_nodes) + print(f" - Loaded {len(filtered_nodes)} filtered nodes for boundary type: '{boundary_type}'. (Original: {len(nodes_from_file)})") + else: + print(f" - Warning: '{filename}' does not contain '{target_column_name}'. Skipping.") + except Exception as e: + print(f" - Error reading '{filename}': {e}. Skipping.") + elif filename == "everything.csv": + everything_csv_path = os.path.join(boundary_csv_folder_path, filename) + + # Step 3: Process 'everything.csv' + if everything_csv_path: + print(f"\nProcessing 'everything.csv' for interior nodes...") + try: + df_everything = pd.read_csv(everything_csv_path, skiprows=5, header=0, engine='python') + if target_column_name in df_everything.columns: + all_nodes_in_everything = set(pd.to_numeric(df_everything[target_column_name], errors='coerce').dropna().astype(int).tolist()) + interior_nodes = all_nodes_in_everything - all_seen_nodes + boundary_nodes_map['interior'] = interior_nodes + print(f" - Loaded {len(interior_nodes)} unique interior nodes from 'everything.csv'.") + else: + print(f" - Warning: 'everything.csv' does not contain '{target_column_name}'.") + except Exception as e: + print(f" - Error reading 'everything.csv': {e}.") + else: + print("\n'everything.csv' not found.") + + print("\nFinished loading boundary nodes from CSVs.") + return boundary_nodes_map + +def load_velocity_lookup(csv_path: str) -> Dict[str, float]: + velocity_lookup = {} + if not os.path.exists(csv_path): + print(f"Warning: Velocity lookup file not found at {csv_path}") + return velocity_lookup + try: + df = pd.read_csv(csv_path) + velocity_lookup = {row['Design Points'].strip(): float(row['Inlet velocity']) for _, row in df.iterrows()} + print(f"Loaded velocity lookup table with {len(velocity_lookup)} design points.") + except Exception as e: + print(f"Error loading velocity lookup from {csv_path}: {e}") + return velocity_lookup + +def get_design_point_info(folder_name: str, velocity_lookup: Dict[str, float]) -> Tuple[str, float]: + # <<< FIXED: Corrected regex to get the full DP name, e.g., 'DP1' >>> + dp_match = re.search(r'(DP\d+)', folder_name) + design_point = dp_match.group(1) if dp_match else 'DP_Unknown' + velocity = velocity_lookup.get(design_point, 0.0) + if velocity == 0.0 and design_point != 'DP_Unknown': + print(f"Warning: No velocity found for {design_point} in lookup table.") + return design_point, velocity + + +# <<< FIXED: Rewritten to parse the new CSV format >>> +def read_velocity_csv_file(csv_file_path: str) -> Tuple[List[int], List[List[float]], List[List[float]]]: + """ + Reads a time-stepped velocity CSV file with the new column format. + """ + print(f" Reading file: {os.path.basename(csv_file_path)}") + try: + # The data starts after the '[Data]' line, so we skip until we find it. + with open(csv_file_path, 'r') as f: + lines = f.readlines() + + data_start_index = -1 + for i, line in enumerate(lines): + if '[Data]' in line: + data_start_index = i + 1 + break + + if data_start_index == -1: + print(" Warning: '[Data]' section not found. Cannot read file.") + return [], [], [] + + # The line after '[Data]' is the header. + header_line = lines[data_start_index] + data_lines = lines[data_start_index + 1:] + + # Use pandas to parse the data from the identified lines + df = pd.read_csv(StringIO("".join(data_lines)), header=None, sep=',\s*', engine='python') + + # Assign column names based on the header line for clarity (optional but good practice) + df.columns = [h.strip() for h in header_line.split(',')] + + # Select columns based on their position (0-indexed) as per your requirement + # X, Y, Z are columns 0, 1, 2 + # Domain Node Number is column 3 + # Velocity u, v, w are columns 5, 6, 7 + coordinates = df.iloc[:, [0, 1, 2]].values.tolist() + nodes = df.iloc[:, 3].astype(int).tolist() + velocities = df.iloc[:, [5, 6, 7]].values.tolist() + + except Exception as e: + print(f" An error occurred while reading {csv_file_path}: {e}") + return [], [], [] + + print(f" Successfully read {len(nodes)} nodes.") + return nodes, coordinates, velocities + +def create_node_features( + velocity_node_ids: List[int], + boundary_nodes_map: Dict[str, Set[int]] +) -> Tuple[torch.Tensor, Dict[str, int], Dict[str, str]]: + # This function appears correct and has been kept as is. + # ... (function content is unchanged) ... + all_node_types = sorted(list(boundary_nodes_map.keys())) + class_to_index = {name: i for i, name in enumerate(all_node_types)} + num_classes = len(all_node_types) + feature_vectors = [] + node_type_counts = defaultdict(int) + + for node_id in velocity_node_ids: + vector = [0] * num_classes + assigned_type = "interior" # Default + for boundary_type_name in all_node_types: + if node_id in boundary_nodes_map.get(boundary_type_name, set()): + assigned_type = boundary_type_name + break + + index = class_to_index[assigned_type] + vector[index] = 1 + node_type_counts[assigned_type] += 1 + feature_vectors.append(vector) + + final_zone_map = {name: name for name in all_node_types} + return torch.tensor(feature_vectors, dtype=torch.float32), dict(node_type_counts), final_zone_map + +def create_time_stepped_velocity_pairs(folder_path: str, boundary_nodes_map: Dict[str, Set[int]]) -> List[Dict]: + # This function appears correct and has been kept as is. + # ... (function content is unchanged) ... + print(f" Creating time-stepped velocity pairs from: {folder_path}") + csv_files = sorted([f for f in os.listdir(folder_path) if f.endswith('.csv')]) + + if len(csv_files) < 2: + print(f" Need at least 2 time steps, found {len(csv_files)}") + return [] + + trajectory_data = [] + + for i in range(len(csv_files) - 1): + current_file = csv_files[i] + target_file = csv_files[i + 1] + + current_path = os.path.join(folder_path, current_file) + target_path = os.path.join(folder_path, target_file) + + print(f" Processing time step {i+1}: {os.path.basename(current_file)} -> {os.path.basename(target_file)}") + + current_nodes, current_coordinates, current_velocities = read_velocity_csv_file(current_path) + target_nodes, _, target_velocities = read_velocity_csv_file(target_path) + + if not current_nodes or not target_nodes or current_nodes != target_nodes: + print(f" Warning: Node mismatch or empty data. Skipping step.") + continue + + node_type_tensor, node_type_counts, zone_map = create_node_features(current_nodes, boundary_nodes_map) + + step_data = { + 'step_number': i + 1, 'current_file': current_file, 'target_file': target_file, + 'node_ids': current_nodes, 'coordinates': current_coordinates, + 'current_velocities': current_velocities, 'target_velocities': target_velocities, + 'node_type_tensor': node_type_tensor, 'node_type_counts': node_type_counts, + 'num_nodes': len(current_nodes), 'zone_map': zone_map + } + trajectory_data.append(step_data) + + print(f" Created {len(trajectory_data)} time-stepped velocity pairs") + return trajectory_data + +def save_data_to_h5( + base_folder_path: str, + design_point_range: range, + boundary_csv_folder_path: str, + output_folder_path: str, + velocity_lookup_csv_path: str +): + device = torch.device('cpu') + os.makedirs(output_folder_path, exist_ok=True) + + print("="*60 + "\nSTEP 1: LOADING VELOCITY LOOKUP TABLE\n" + "="*60) + velocity_lookup = load_velocity_lookup(velocity_lookup_csv_path) + + print("\n" + "="*60 + "\nSTEP 2: READING MESH BOUNDARY INFORMATION\n" + "="*60) + target_column_name = ' Domain Node Number [ ]' + boundary_nodes_map = load_boundary_nodes_from_csvs(boundary_csv_folder_path, target_column_name) + if not boundary_nodes_map: + print("CRITICAL ERROR: Failed to read boundary nodes. Cannot proceed.") + return + + basename = "final_data_timestep" + h5_path = os.path.join(output_folder_path, f"{basename}_data.h5") + meta_json_path = os.path.join(output_folder_path, f"{basename}_data.json") + + print("\n" + "="*60 + "\nSTEP 3: PROCESSING TIME-STEPPED DATA\n" + "="*60) + + group_to_folder_map = {} + final_class_map = {} + group_counter = 0 + + with h5py.File(h5_path, 'w') as f: + for dp_num in design_point_range: + dp_folder = f"dataset_DP{dp_num}" + folder_path = os.path.join(base_folder_path, dp_folder) + + design_point, velocity = get_design_point_info(dp_folder, velocity_lookup) + + print(f"\n{'='*40}\nProcessing Design Point: {design_point} (Inlet Vel: {velocity} m/s)\n{'='*40}") + + if not os.path.exists(folder_path): + print(f"WARNING: Folder {folder_path} does not exist. Skipping...") + continue + + trajectory_data = create_time_stepped_velocity_pairs(folder_path, boundary_nodes_map) + + if not trajectory_data: + print(f"No valid trajectory data found in {dp_folder}") + continue + + if not final_class_map: + zone_map = trajectory_data[0].get('zone_map', {}) + final_class_map = {name: i for i, name in enumerate(sorted(list(set(zone_map.values()))))} + + # Each DP folder's trajectory becomes one group in the H5 file + group_name = f"group_{group_counter}" + dataset_group = f.create_group(group_name) + + num_steps = len(trajectory_data) + num_nodes = trajectory_data[0]['num_nodes'] + + # Stack all time steps into single arrays for this group + all_current_vel = torch.tensor([s['current_velocities'] for s in trajectory_data], dtype=torch.float32) + all_target_vel = torch.tensor([s['target_velocities'] for s in trajectory_data], dtype=torch.float32) + + # Static data (same for all steps in a trajectory) + coordinates = torch.tensor(trajectory_data[0]['coordinates'], dtype=torch.float32) + node_type = trajectory_data[0]['node_type_tensor'] + + dataset_group.create_dataset("current_velocities", data=all_current_vel.cpu().numpy()) + dataset_group.create_dataset("target_velocities", data=all_target_vel.cpu().numpy()) + dataset_group.create_dataset("coordinates", data=coordinates.cpu().numpy()) + dataset_group.create_dataset("node_type", data=node_type.cpu().numpy()) + + group_to_folder_map[group_name] = { + 'design_point': design_point, 'velocity': velocity, + 'source_folder': dp_folder, 'num_steps': num_steps, 'num_nodes': num_nodes, + 'node_type_counts': trajectory_data[0]['node_type_counts'] + } + + print(f" Saved {num_steps} time steps to H5 group: {group_name}") + group_counter += 1 + + metadata = { + "file_info": group_to_folder_map, + "class_map": final_class_map, # <<< FIXED: Use the populated class map + "total_groups": group_counter + } + + with open(meta_json_path, 'w') as json_file: + json.dump(metadata, json_file, indent=4) + + print("\n" + "="*60 + "\nPROCESSING COMPLETE\n" + "="*60) + print(f"Total groups created: {group_counter}") + print(f"H5 file saved to: {h5_path}") + print(f"Metadata saved to: {meta_json_path}") + +# --- Main Execution Block --- +def main(): + # --- CONFIGURATION --- + # Base folder path where all 'dataset_DPx' folders are located + BASE_FOLDER_PATH = r"C:\Users\accel\OneDrive\Desktop" + # Range of design points to process (e.g., range(1, 10) for DP1 to DP9) + DESIGN_POINT_RANGE = range(1, 10) + # Path to the DesignPoints.csv file for velocity lookup + VELOCITY_LOOKUP_PATH = r"d:\Python_and_machine_learning_tutorials\DesignPoints.csv" + # Path to the folder containing boundary definition CSVs + BOUNDARY_CSV_PATH = r"D:\data_validation\node_type_timestepped" + # Path where the final H5 and JSON files will be saved + OUTPUT_FOLDER_PATH = r"D:\ANK_official\a_Dataset_generated\preprocessed_for_model\timestep_data" + + save_data_to_h5( + BASE_FOLDER_PATH, + DESIGN_POINT_RANGE, + BOUNDARY_CSV_PATH, + OUTPUT_FOLDER_PATH, + VELOCITY_LOOKUP_PATH + ) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/cleanroom/datasets/time_pt.py b/cleanroom/datasets/time_pt.py new file mode 100644 index 0000000000000000000000000000000000000000..23442e1e5bc218212574f2dcea11fda3224cb2a4 --- /dev/null +++ b/cleanroom/datasets/time_pt.py @@ -0,0 +1,133 @@ +import os +import h5py +import torch +import json +import numpy as np +import random +from typing import List, Dict, Any + +def create_pt_files_from_h5(h5_path: str, output_dir: str): + """ + Reads the consolidated time-stepped H5 file. + - Flattens the training data into individual time-step samples. + - Groups the testing data by full trajectory. + """ + print("="*60) + print("STARTING H5 TO .PT CONVERSION FOR TIME-STEPPED DATA") + print("="*60) + + # --- 1. SETUP PATHS AND LOAD METADATA --- + os.makedirs(output_dir, exist_ok=True) + meta_json_path = h5_path.replace('.h5', '.json') + + if not os.path.exists(h5_path): + raise FileNotFoundError(f"H5 file not found at: {h5_path}") + if not os.path.exists(meta_json_path): + raise FileNotFoundError(f"Metadata JSON file not found at: {meta_json_path}") + + with open(meta_json_path, 'r') as f: + metadata = json.load(f) + + file_info = metadata.get("file_info", {}) + group_names = sorted(list(file_info.keys())) + + if not group_names: + print("No groups found in the H5 file. Exiting.") + return + + print(f"Found {len(group_names)} total simulation trajectories in the metadata.") + + # --- 2. SPLIT DATASET --- + random.seed(42) # Use a fixed seed for reproducibility + random.shuffle(group_names) + + train_end_idx = round(len(group_names) * 0.8) + splits = { + 'train': group_names[:train_end_idx], + 'test': group_names[train_end_idx:] + } + + print("\nSplitting data into sets based on trajectories:") + for split_name, groups in splits.items(): + print(f" - {split_name.upper()} set: {len(groups)} trajectories") + + # --- 3. PROCESS EACH SPLIT AND SAVE TO A .PT FILE --- + with h5py.File(h5_path, 'r') as h5_file: + for split_name, groups_in_split in splits.items(): + if not groups_in_split: + print(f"\n--- No trajectories for '{split_name}' split. Skipping. ---") + continue + + print(f"\n--- Processing '{split_name}' split ---") + + final_data_list: List[Dict[str, Any]] = [] + + # <<< MODIFIED: Conditional logic based on the split type >>> + if split_name == 'train': + # --- FLATTENING LOGIC FOR THE TRAINING SET --- + print(" -> Applying FLATTENING for the training set.") + for group_name in groups_in_split: + if group_name not in h5_file: continue + group = h5_file[group_name] + meta_info = file_info.get(group_name, {}) + num_steps = meta_info.get('num_steps', 0) + + # Load static data once per trajectory + coordinates = torch.tensor(group['coordinates'][:], dtype=torch.float32) + node_type = torch.tensor(group['node_type'][:], dtype=torch.float32) + + # Load all time-varying data for the trajectory + current_velocities_all_steps = torch.tensor(group['current_velocities'][:], dtype=torch.float32) + target_velocities_all_steps = torch.tensor(group['target_velocities'][:], dtype=torch.float32) + + # Loop through each time-step and append it as an individual sample + for step_idx in range(num_steps): + sample_data = { + 'coordinates': coordinates, 'node_type': node_type, + 'current_velocities': current_velocities_all_steps[step_idx], + 'target_velocities': target_velocities_all_steps[step_idx], + 'meta_info': meta_info + } + final_data_list.append(sample_data) + + else: # For 'test' split + # --- GROUPING LOGIC FOR THE TESTING SET --- + print(" -> Applying GROUPING for the testing set.") + for group_name in groups_in_split: + if group_name not in h5_file: continue + group = h5_file[group_name] + meta_info = file_info.get(group_name, {}) + + # Load all data for the entire trajectory at once + trajectory_sample = { + 'coordinates': torch.tensor(group['coordinates'][:], dtype=torch.float32), + 'node_type': torch.tensor(group['node_type'][:], dtype=torch.float32), + 'current_velocities': torch.tensor(group['current_velocities'][:], dtype=torch.float32), + 'target_velocities': torch.tensor(group['target_velocities'][:], dtype=torch.float32), + 'meta_info': meta_info + } + final_data_list.append(trajectory_sample) + + # --- Saving Logic (same for both splits) --- + base_filename = os.path.splitext(os.path.basename(h5_path))[0] + output_pt_path = os.path.join(output_dir, f"{base_filename}_{split_name}.pt") + + torch.save(final_data_list, output_pt_path) + + if split_name == 'train': + print(f" -> Successfully saved {len(final_data_list)} FLATTENED time-step samples to: {output_pt_path}") + else: + print(f" -> Successfully saved {len(final_data_list)} GROUPED trajectories to: {output_pt_path}") + + print("\n" + "="*60) + print("CONVERSION COMPLETE") + print(f"Output .pt files are located in: {output_dir}") + print("="*60) + +def main(): + H5_FILE_PATH = r"D:\ANK_official\a_Dataset_generated\preprocessed_for_model\timestep_data\final_data_timestep_data.h5" + PT_OUTPUT_DIR = r"D:\ANK_official\a_Dataset_generated\preprocessed_for_model\timestep_data\pt_files" + create_pt_files_from_h5(H5_FILE_PATH, PT_OUTPUT_DIR) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/cleanroom/output/best_model.pth b/cleanroom/output/best_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..18dca07d373a50eb463014d73be0342368bf2d8d --- /dev/null +++ b/cleanroom/output/best_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93e67b8b58b57fc50636264b46db19020ceb89747422dd84ba07653409d1748b +size 3078463 diff --git a/cleanroom/output/checkpoint_epoch_40.pth b/cleanroom/output/checkpoint_epoch_40.pth new file mode 100644 index 0000000000000000000000000000000000000000..607db1a90a62ea7fb1eb24f6cfcbc1a13e946ca2 --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_40.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e322deb50f285242ac56ed9389e5f3688bae833ae06f46e147ddc68aa8e8ff +size 9255562 diff --git a/cleanroom/output/checkpoint_epoch_400.pth b/cleanroom/output/checkpoint_epoch_400.pth new file mode 100644 index 0000000000000000000000000000000000000000..bb389d268a778182594574eed952e4cb192068e3 --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_400.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ed5b0e59efcc604830cc14b959202fbd47d6289665bd5c161d3a5b00e83274f +size 9256082 diff --git a/cleanroom/output/checkpoint_epoch_50.pth b/cleanroom/output/checkpoint_epoch_50.pth new file mode 100644 index 0000000000000000000000000000000000000000..4bd2374028b69434f88312dacde9031874362f69 --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_50.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cfa1a7d4162707545288ab50444852db74c131a8a2fa1c8a3ce216b1a0ddc06 +size 9255562 diff --git a/cleanroom/output/checkpoint_epoch_60.pth b/cleanroom/output/checkpoint_epoch_60.pth new file mode 100644 index 0000000000000000000000000000000000000000..7fa175eff9d949f5b96ab296344ef333d94031be --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_60.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:152ed6539fe707d0f1b60010f5bed2933239977a16fbd2150e99a083097b67a6 +size 9255562 diff --git a/cleanroom/output/checkpoint_epoch_70.pth b/cleanroom/output/checkpoint_epoch_70.pth new file mode 100644 index 0000000000000000000000000000000000000000..b5aa2f04d5f1ee3e57a7105b9ca4e11579c0d1f1 --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_70.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:821a0c5f64fe913a4db2b04a49f618c0cf66680687a5ccfcbeb119f6b70d51ec +size 9255562 diff --git a/cleanroom/output/checkpoint_epoch_80.pth b/cleanroom/output/checkpoint_epoch_80.pth new file mode 100644 index 0000000000000000000000000000000000000000..6666c25adb40ad5b819b0bb4a4650f18be79902b --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_80.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8953dace600898c7582513928d0fb7648a6c08c1c14e27c6875619152e55b4eb +size 9255562 diff --git a/cleanroom/output/checkpoint_epoch_90.pth b/cleanroom/output/checkpoint_epoch_90.pth new file mode 100644 index 0000000000000000000000000000000000000000..a3f24d44cb88b55f4bc325ac1c8039cf605eee8f --- /dev/null +++ b/cleanroom/output/checkpoint_epoch_90.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a54205076f9e4c63fc07012acba727a657245e8eb867e970ce21fd5582a00769 +size 9255562 diff --git a/cleanroom/output/checkpoint_latest.pth b/cleanroom/output/checkpoint_latest.pth new file mode 100644 index 0000000000000000000000000000000000000000..5c1e000ad481abcc539e407aaf660e4274729629 --- /dev/null +++ b/cleanroom/output/checkpoint_latest.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02577af750ee0fa9891d66aa4f4169ce44d4e25d12aeb9668da664df891b1c13 +size 9254522 diff --git a/cleanroom/output/loss_history.json b/cleanroom/output/loss_history.json new file mode 100644 index 0000000000000000000000000000000000000000..d4604a57c312004967aef1bddf5a74c00a1227b2 --- /dev/null +++ b/cleanroom/output/loss_history.json @@ -0,0 +1,489 @@ +{ + "train_loss_history": [ + 0.047928659315781504, + 0.0006949022402482789, + 0.0010734537166794517, + 0.0006152855177427109, + 0.00011687942116370767, + 0.00020554457697606563, + 0.0007192346013056974, + 0.00011859725575276393, + 0.0003182351600021081, + 0.0005468394994454358, + 0.0004360543223084239, + 0.0012434385571134879, + 5.973176550695647e-05, + 4.7412790192547225e-05, + 6.238552469206092e-05, + 0.00015723898853543667, + 0.00010148409973134695, + 0.00014321584157705162, + 0.00012077949777101117, + 8.536530996812897e-05, + 7.980600606639531e-05, + 0.00024803338860627475, + 0.00010658709384645037, + 6.773578205252632e-05, + 8.155245115901062e-05, + 6.60438944421686e-05, + 8.051208146861989e-05, + 7.357162857343528e-05, + 4.69376460630831e-05, + 6.422285643879602e-05, + 0.00012228472015134831, + 4.85294423324115e-05, + 3.434202191389873e-05, + 3.2734662624173205e-05, + 3.0314821193253932e-05, + 2.9248124266170513e-05, + 3.204084148919763e-05, + 2.434612859384575e-05, + 2.451069366125621e-05, + 2.3173857902005884e-05, + 2.1990548752932158e-05, + 2.0399042922468635e-05, + 2.020785857047829e-05, + 1.952424931517632e-05, + 1.8617949248356566e-05, + 1.8276091769788955e-05, + 1.8054693970352412e-05, + 1.768256500231234e-05, + 1.7491002851009694e-05, + 1.784203906583427e-05, + 0.0076242006888470925, + 0.00013156365828885074, + 8.274292987331294e-05, + 0.00012027848812789905, + 0.00019276423797397942, + 6.441972457838112e-05, + 0.00015153984726888728, + 6.234324651535444e-05, + 0.0004435729861317111, + 5.55793773625202e-05, + 0.0005415625738942906, + 4.888701246338106e-05, + 5.984653692798907e-05, + 4.9954603169184836e-05, + 8.881673217106972e-05, + 0.0001422768891730533, + 0.00012575291818423914, + 0.00010410523897077433, + 6.085281701412182e-05, + 4.9238619450997765e-05, + 9.720630484380264e-05, + 0.00025967211489990113, + 9.572093290988129e-05, + 0.00019708159576029257, + 0.0001653077016558918, + 3.5846492350452734e-05, + 5.598483791830852e-05, + 5.330636422209019e-05, + 4.37195933859345e-05, + 0.00010271433415403117, + 0.00010793821548284967, + 9.543247569253032e-05, + 4.5204619508429585e-05, + 5.153431960386232e-05, + 0.00024224689295958765, + 3.3395609312702494e-05, + 4.77683986707308e-05, + 3.852387731841413e-05, + 2.8161190483620868e-05, + 3.933131680811852e-05, + 4.062870401580049e-05, + 0.00017846101365831562, + 6.78482065757887e-05, + 3.034723103151205e-05, + 5.021901810623734e-05, + 0.00022681239045413388, + 2.8216781784480688e-05, + 2.6548571810688887e-05, + 3.366329710995582e-05, + 4.041387116371205e-05, + 3.4067415006784056e-05, + 2.7742833667203243e-05, + 6.532883340309275e-05, + 2.7837519277428594e-05, + 3.822202927440404e-05, + 2.464142943190991e-05, + 2.5300616486988017e-05, + 2.7211633015652528e-05, + 2.318644450253539e-05, + 2.7594858429917e-05, + 3.7894345847021716e-05, + 3.1579057753133293e-05, + 2.2764573279463717e-05, + 2.317715486436026e-05, + 2.5766221517261637e-05, + 2.5689236573971663e-05, + 2.3766055815313853e-05, + 2.7410001256869053e-05, + 2.0527946976776483e-05, + 2.0467833745501223e-05, + 2.124201783232546e-05, + 2.137484773193659e-05, + 2.0112095912366658e-05, + 2.261692369078072e-05, + 5.7032989318257754e-05, + 1.8558992160001144e-05, + 1.8634256803853927e-05, + 1.806864532425337e-05, + 1.8156596254585745e-05, + 1.8091417579514014e-05, + 1.8572081171768198e-05, + 1.7913417134346917e-05, + 1.7492383150648237e-05, + 1.7677787195462556e-05, + 1.73300238979542e-05, + 1.7837312834595894e-05, + 1.7356072771200816e-05, + 1.7050836430789178e-05, + 1.698847618460591e-05, + 1.6854014483689325e-05, + 1.707957593056014e-05, + 1.660490888709458e-05, + 1.6581803887322567e-05, + 1.6509257679208924e-05, + 1.6578573131368468e-05, + 1.68111487807489e-05, + 1.625855646173686e-05, + 1.621479530727489e-05, + 1.6241659430466676e-05, + 1.6177183388600154e-05, + 0.0010911367265671898, + 7.442433320321605e-05, + 7.933682587663585e-05, + 3.3475778324597203e-05, + 3.446860121271586e-05, + 6.608223006813414e-05, + 5.027403948736427e-05, + 0.00015239930853480751, + 0.00021222714591882696, + 2.8011672265726905e-05, + 3.4801408058198384e-05, + 3.668981861900077e-05, + 4.312667793461453e-05, + 4.0065555824541974e-05, + 4.2022302735179974e-05, + 0.00016701333634611736, + 7.104454455887742e-05, + 9.122977652162101e-05, + 2.591940750045813e-05, + 4.095985418622219e-05, + 3.375768806348022e-05, + 6.485568052633308e-05, + 3.7554104417061105e-05, + 4.4857958994869384e-05, + 0.0001445643237553868, + 0.00023311414374759036, + 2.7123339008361498e-05, + 7.22349678005304e-05, + 3.277888079115551e-05, + 2.6256417674704915e-05, + 4.1481853985210345e-05, + 4.5524668604103905e-05, + 3.747794682402111e-05, + 3.439817989088084e-05, + 3.17700941844049e-05, + 5.616244556239634e-05, + 9.871728326597542e-05, + 3.675560587875329e-05, + 3.64914291678456e-05, + 5.720040345560725e-05, + 6.909822007892826e-05, + 3.805226023932106e-05, + 5.7736335827321976e-05, + 6.396610678183732e-05, + 4.896001249316819e-05, + 0.00015144073469646477, + 2.6723251167957938e-05, + 8.927947137157102e-05, + 2.316890464536e-05, + 2.473204444139414e-05, + 2.444031212328692e-05, + 2.5544984258598752e-05, + 2.6277840727756807e-05, + 2.819937620451559e-05, + 0.000162414775218711, + 8.753876606194167e-05, + 2.4463708219022855e-05, + 2.288677669465517e-05, + 2.6474674261938828e-05, + 2.2370629401328396e-05, + 2.6407884374574325e-05, + 2.7745542928441777e-05, + 2.5059742559944477e-05, + 2.7253926839257996e-05, + 5.304826753439574e-05, + 0.00013340591717200082, + 5.203852989209923e-05, + 2.7994609449945673e-05, + 2.4307017401168884e-05, + 2.2538082447046176e-05, + 2.141573808826665e-05, + 2.6187374550661833e-05, + 3.089505480167219e-05, + 2.696544395587951e-05, + 3.93762534873383e-05, + 7.591918548797458e-05, + 2.8971471138493362e-05, + 2.4664353550483547e-05, + 2.1681920947574316e-05, + 2.3474315852990693e-05, + 7.545616191317565e-05, + 2.0951490840022317e-05, + 2.183390266241751e-05, + 2.3465598384351133e-05, + 2.4206859313454823e-05, + 1.905145491722204e-05, + 5.650045535563358e-05, + 2.0850099450294844e-05, + 2.084959590082903e-05, + 2.5635879509401707e-05, + 4.0397699890991354e-05, + 2.869608287914962e-05, + 1.954651972464185e-05, + 1.9960793675466808e-05, + 2.427598328814997e-05, + 3.9527793762718094e-05, + 1.9140237479011363e-05, + 1.9312606206515274e-05, + 2.7227431550301027e-05, + 2.4682951935741776e-05, + 1.9616607208341635e-05, + 2.008559867631861e-05, + 5.7385208414303265e-05, + 2.0489923385823343e-05, + 1.7063187350616723e-05, + 1.8679559377633442e-05, + 1.811177158641074e-05, + 1.9339116449099098e-05, + 2.0741530064562244e-05, + 2.322485317963757e-05, + 1.7405349632299563e-05, + 2.268058225805736e-05, + 2.041256734367306e-05, + 1.8847526312063773e-05, + 2.2477544341948458e-05, + 2.067150526990621e-05, + 1.7823933413881553e-05, + 2.0719849484169062e-05, + 1.8576113965380274e-05, + 1.857739854750091e-05, + 1.6965552341002116e-05, + 2.1370908398560343e-05, + 1.9348411941874392e-05, + 1.755121038034926e-05, + 2.088755237765505e-05, + 1.7965279188171156e-05, + 2.7262071336026652e-05, + 1.6599874017986025e-05, + 1.6918255245341613e-05, + 1.6440932010190523e-05, + 1.6970141357258955e-05, + 1.7509820256408597e-05, + 1.8451085099642248e-05, + 1.7665876169175e-05, + 1.6037526937620178e-05, + 1.724864239676073e-05, + 1.6443012690273828e-05, + 1.7080887233167312e-05, + 1.620907245982006e-05, + 1.8693770192095887e-05, + 1.609925264479837e-05, + 1.6967979909102932e-05, + 1.6053024607668032e-05, + 1.5949588553364688e-05, + 1.618354345975569e-05, + 1.6426505992969754e-05, + 1.5674319225411997e-05, + 1.593089580476741e-05, + 1.5978005228888786e-05, + 1.657686236965861e-05, + 1.5711943789299628e-05, + 1.6019769665246765e-05, + 1.561672781678537e-05, + 1.5991263429652515e-05, + 1.5585397933451253e-05, + 1.6186086417005036e-05, + 1.569130720846417e-05, + 1.5789657821918652e-05, + 1.5977488440481928e-05, + 1.5566254633172952e-05, + 1.544200677234798e-05, + 1.5320198310942316e-05, + 1.5242203481081373e-05, + 1.5205030934836623e-05, + 1.5297691105800686e-05, + 1.5185668236368117e-05, + 1.5071697819476784e-05, + 1.5170367902056529e-05, + 1.518320507748764e-05, + 1.5004031786818805e-05, + 1.5559591796289986e-05, + 1.5063847841244216e-05, + 1.502392449781241e-05, + 1.4957866932848394e-05, + 1.4964722523656886e-05, + 1.490988087434896e-05, + 1.5115838955603177e-05, + 1.4903098299659934e-05, + 1.5295822775493636e-05, + 1.4886948107144336e-05, + 1.4856360331914054e-05, + 1.4833782612375338e-05, + 1.4816578115460506e-05, + 1.5170217258089266e-05, + 1.4799613546321354e-05, + 1.51262711540448e-05, + 1.479400445873036e-05, + 1.4746148482247321e-05, + 1.4753102456774848e-05, + 1.4719250924042137e-05, + 1.4740951366790565e-05, + 1.4707885712818114e-05, + 1.5169788224022089e-05, + 1.4683611760978582e-05, + 1.4682420831314286e-05, + 1.4673869787014212e-05, + 1.4698160640727585e-05, + 1.4668882578120612e-05, + 1.4720104903088042e-05, + 1.466869959616354e-05, + 0.00028052000085903423, + 2.6649565911154792e-05, + 2.4657888361703675e-05, + 3.0153244282109227e-05, + 5.649514268865738e-05, + 3.524949845182403e-05, + 3.446956838880096e-05, + 2.1749216474284205e-05, + 6.591638436473302e-05, + 2.5616885818418054e-05, + 2.396918703241542e-05, + 6.274653559814666e-05, + 3.235189944155244e-05, + 3.7791815526235265e-05, + 5.1199640882294515e-05, + 0.0001316607265260628, + 2.0101025679933633e-05, + 2.0187760754558556e-05, + 2.128763493542835e-05, + 1.9942122932344063e-05, + 2.8172145191188303e-05, + 2.7934660590799293e-05, + 2.3972071922411637e-05, + 2.7624952221422543e-05, + 4.4610551211652035e-05, + 5.372790423732665e-05, + 3.2932565484783805e-05, + 1.9327159422886777e-05, + 4.855859273530686e-05, + 3.621025223419217e-05, + 1.903905027927576e-05, + 2.841011406595655e-05, + 3.781146788503139e-05, + 2.502001833465729e-05, + 8.768400237796375e-05, + 1.8553423883945058e-05, + 2.0228494800462027e-05, + 2.2526833169810773e-05, + 2.1763349746163984e-05, + 2.311143349184174e-05, + 0.0001538188568389795, + 2.1988569620658886e-05, + 5.817659435645523e-05, + 2.0074219074530355e-05, + 1.8935993287078945e-05, + 3.496657699690949e-05, + 2.9860211687930184e-05, + 2.324121890254309e-05, + 2.176403506021298e-05, + 2.2414497434336783e-05 + ], + "val_loss_history": [ + 0.5926774516701698, + 0.17820684611797333, + 0.17300713807344437, + 0.192041527479887, + 0.19655945524573326, + 0.395011343061924, + 0.3089020736515522, + 0.32013382762670517, + 0.6825258433818817, + 0.2235833778977394, + 0.2483937107026577, + 0.28077036142349243, + 0.26743458956480026, + 0.28192055970430374, + 0.27316276729106903, + 0.2448822781443596, + 0.23578884452581406, + 0.15609030425548553, + 0.18058374896645546, + 0.323333278298378, + 0.37206557393074036, + 0.4064665026962757, + 1.4340878427028656, + 0.434962697327137, + 0.30037520825862885, + 0.3674027696251869, + 2.0432062298059464, + 0.3523652218282223, + 1.651973444968462, + 1.6599613651633263, + 2.4509262703359127, + 2.445054952055216, + 2.445206616073847, + 2.4445605911314487, + 2.4465204887092113, + 0.43507517874240875, + 0.2396019622683525, + 0.5704065803438425, + 0.25531776063144207, + 0.22239035367965698 + ], + "val_epochs": [ + 10, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 110, + 120, + 130, + 140, + 150, + 160, + 170, + 180, + 190, + 200, + 210, + 220, + 230, + 240, + 250, + 260, + 270, + 280, + 290, + 300, + 310, + 320, + 330, + 340, + 350, + 360, + 370, + 380, + 390, + 400 + ], + "best_val_loss": 0.15609030425548553 +} \ No newline at end of file diff --git a/cleanroom/output/rollout_results_epoch_40.npy b/cleanroom/output/rollout_results_epoch_40.npy new file mode 100644 index 0000000000000000000000000000000000000000..21f070b3f62839072935d5a9555ba4a64e36b9ea --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_40.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0fa978cd9654d084118ee4da90a6b5cec4da81cfcbb43c10b00eef3c0d0acb2 +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_400.npy b/cleanroom/output/rollout_results_epoch_400.npy new file mode 100644 index 0000000000000000000000000000000000000000..9563494269509a290bab80c759d82802c7b2c12e --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_400.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d5295fb465a5867905a7e30e12f9b48dff9c9a4bf50af3c0d5a215e6a57de5d +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_50.npy b/cleanroom/output/rollout_results_epoch_50.npy new file mode 100644 index 0000000000000000000000000000000000000000..6d11d5e5a176eeb44f0b03c4077da88a8f508b59 --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_50.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70748c9cba31b51811dcff1339c65d9083feb87f9b1e68aa014801c0dd41303b +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_60.npy b/cleanroom/output/rollout_results_epoch_60.npy new file mode 100644 index 0000000000000000000000000000000000000000..1702bbccaabcc97d507a6d5b888184907794be26 --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_60.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6193de14401d0c36ea59aa4e214cf5947652f0c4435906ab74fdfe31964c2c80 +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_70.npy b/cleanroom/output/rollout_results_epoch_70.npy new file mode 100644 index 0000000000000000000000000000000000000000..204923d6a6d63e6c6fa7549822c5da6dca227bac --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_70.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a41f25a182f1896c96f7a07fc8cc39278b44d601dc9a8dc3176badcfb7442470 +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_80.npy b/cleanroom/output/rollout_results_epoch_80.npy new file mode 100644 index 0000000000000000000000000000000000000000..5266619cb93902ddcbad98723e5c5c99e88323aa --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_80.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad6d95a01bb6291c769dbf3f562f49503dd7bcfe5fd475bde490a19c341acd1 +size 109707267 diff --git a/cleanroom/output/rollout_results_epoch_90.npy b/cleanroom/output/rollout_results_epoch_90.npy new file mode 100644 index 0000000000000000000000000000000000000000..43003904eb3e6acf6cd4848f4134509ceaaf8be4 --- /dev/null +++ b/cleanroom/output/rollout_results_epoch_90.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a7f39f8d2c9507557b4ab0d8a25c7789a67fdba1f0bed571dc899c17ad14adb +size 109707267 diff --git a/cleanroom/outputs_800/best_model.pth b/cleanroom/outputs_800/best_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..8a82bc0dddf45fdb8c859bdecfdc793cc5bb21c7 --- /dev/null +++ b/cleanroom/outputs_800/best_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8717c13b11879c442cabddc8018615c59408e64b5a4ce1d4d46234784f10d52 +size 11778111 diff --git a/cleanroom/outputs_800/checkpoint_epoch_70.pth b/cleanroom/outputs_800/checkpoint_epoch_70.pth new file mode 100644 index 0000000000000000000000000000000000000000..b701bf5b724588d35c8728a70225b3c12b338303 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_70.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c430d968fd1d2aa2cd22a9edaf45aca7b462d57a29efc0b9424cc55190438ce +size 35356294 diff --git a/cleanroom/outputs_800/checkpoint_epoch_700.pth b/cleanroom/outputs_800/checkpoint_epoch_700.pth new file mode 100644 index 0000000000000000000000000000000000000000..6474a31fd7f9ff7118a44647a6ecdf256c904bbf --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_700.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21348c2db7541d3d5301cbd89962f3a7bd15bb5a98a6d022d79ebdb27206b3ca +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_710.pth b/cleanroom/outputs_800/checkpoint_epoch_710.pth new file mode 100644 index 0000000000000000000000000000000000000000..e9a2fc03f1267ffb93bf4da5aa45c88fee0bbd3d --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_710.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef9d20a744cba38220d51865e33167f77b4eb186b48bb5aacba5cef8d55bc957 +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_720.pth b/cleanroom/outputs_800/checkpoint_epoch_720.pth new file mode 100644 index 0000000000000000000000000000000000000000..d879707f6eec1513794a04e79dc5e90359a03e1a --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_720.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9c444e0415adc3f2804de9ff23a8468aec17940f5f5e39dc7f0c90ce531fa2d +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_730.pth b/cleanroom/outputs_800/checkpoint_epoch_730.pth new file mode 100644 index 0000000000000000000000000000000000000000..81c2b0bd7cc2e1689da504bbc20168984bfdd655 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_730.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:773b795b084a6ffe1a61ec7e148b6d3b8757986115dfcb2816a8d502322228b6 +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_740.pth b/cleanroom/outputs_800/checkpoint_epoch_740.pth new file mode 100644 index 0000000000000000000000000000000000000000..d22ec1dcef12e15b61fe52147c1a71de4aa61a07 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_740.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8da3e2010e3704a22215e3f2c1a4ef4b8588662f37ac723d5449da62c4a1955c +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_750.pth b/cleanroom/outputs_800/checkpoint_epoch_750.pth new file mode 100644 index 0000000000000000000000000000000000000000..ee2ae7cbd5ff148802e75c4985944efe69015d6f --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_750.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eab82cdf1935a75b0b64950306dc0918691ad1916fa21e8f1df7c413cd108ece +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_760.pth b/cleanroom/outputs_800/checkpoint_epoch_760.pth new file mode 100644 index 0000000000000000000000000000000000000000..022eaa262ba9bc2bcf650d2c98b63ab7d048c906 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_760.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7d99d7ce35da6f894c5d672429062cd628b9bc3b08d36153bf25d2bd6cdcf79 +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_770.pth b/cleanroom/outputs_800/checkpoint_epoch_770.pth new file mode 100644 index 0000000000000000000000000000000000000000..69d8e2a825081316d0cbb38a026d7417052c4e09 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_770.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d47d7008cd437068bdc59386a31d4a15a75357959e8e4fec620db6de83b2001e +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_780.pth b/cleanroom/outputs_800/checkpoint_epoch_780.pth new file mode 100644 index 0000000000000000000000000000000000000000..e1ec662725b78ea5d237dfd382d43efedef1bbbd --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_780.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99ab7db208989ebcb6c26e93b06cdbd0bde2731ecb10acf73b5464d873a2d44a +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_790.pth b/cleanroom/outputs_800/checkpoint_epoch_790.pth new file mode 100644 index 0000000000000000000000000000000000000000..bc56eacaef68ad01159a2b78ee83645b10219a76 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_790.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d2a1928d16193b11f99cf5696dc44411eb2b3bde0ce7bd5d4b8b2ffd16fcce9 +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_80.pth b/cleanroom/outputs_800/checkpoint_epoch_80.pth new file mode 100644 index 0000000000000000000000000000000000000000..77286cd2b6b927c875d58d834b8972f2adcec58a --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_80.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:272ad4ef80d5f6a5db82206be06141ae93a0da24e6d418f128efdcf1166bd764 +size 35356294 diff --git a/cleanroom/outputs_800/checkpoint_epoch_800.pth b/cleanroom/outputs_800/checkpoint_epoch_800.pth new file mode 100644 index 0000000000000000000000000000000000000000..b3aea51543c0fe4a235d962eb93aaf4601dcb75f --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_800.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da2bd11e42603d5ca3db209a8215e535fe9401572d8d18ad18425947d33d81c2 +size 35356820 diff --git a/cleanroom/outputs_800/checkpoint_epoch_90.pth b/cleanroom/outputs_800/checkpoint_epoch_90.pth new file mode 100644 index 0000000000000000000000000000000000000000..d73c70ca96a1c629f3ce3197a6c636eca003fd37 --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_epoch_90.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74940f344a3523d9385733862b01a42c7f02674236cbc8b7b23c12d7f5edc8e5 +size 35356294 diff --git a/cleanroom/outputs_800/checkpoint_latest.pth b/cleanroom/outputs_800/checkpoint_latest.pth new file mode 100644 index 0000000000000000000000000000000000000000..593000304e9ea7702e78c5adf19030e1bbf0c2cb --- /dev/null +++ b/cleanroom/outputs_800/checkpoint_latest.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574db7cfb89468162403cc9c02c2d090097e8bf25f3e89ce0e4ef7c171bb7daf +size 35355242 diff --git a/cleanroom/outputs_800/loss_history.json b/cleanroom/outputs_800/loss_history.json new file mode 100644 index 0000000000000000000000000000000000000000..b032f607513910b85a8acae40f7bcb6c7c5ca750 --- /dev/null +++ b/cleanroom/outputs_800/loss_history.json @@ -0,0 +1,1771 @@ +{ + "train_loss_history": [ + 0.07519881309104223, + 0.006257650850536514, + 0.005655151523043108, + 0.004111358297667934, + 0.0012229442612823858, + 0.0015542152705078757, + 0.0022238270944551428, + 0.00726080516762792, + 0.0006572933918731727, + 0.00032864267967760974, + 0.0007980327645782381, + 0.0006921060937928854, + 0.00028815311477294677, + 0.010275128146801444, + 0.00024284309358083635, + 0.000170050012061068, + 0.00015787620037572284, + 0.00014559812434287634, + 0.0002601015756696821, + 0.0001632957209525096, + 0.00015866604983971904, + 0.00124528430176672, + 0.00030520910756191746, + 0.0001270360754845974, + 0.00024291281371489366, + 0.00031856896781069674, + 0.0002150733924305601, + 0.0003516104864841696, + 0.00022525443558948243, + 0.00011490803129263484, + 0.00010679551775591394, + 0.0001344338256181913, + 0.00011439129905469008, + 0.00034610510698240306, + 0.00017800320404311992, + 0.000270937160804312, + 9.550520222224744e-05, + 9.224796544216997e-05, + 9.192669412922089e-05, + 8.885781269447379e-05, + 8.673247663209535e-05, + 8.825743644488436e-05, + 8.669495209861805e-05, + 8.513179815618799e-05, + 8.406834276096621e-05, + 8.460888272558466e-05, + 8.342986155750088e-05, + 8.295477701054687e-05, + 8.277187342423095e-05, + 8.381099533671093e-05, + 0.035422933904738746, + 0.0004252628482934675, + 0.0006062945391527159, + 0.00030718961781757393, + 0.00031750700579353625, + 0.0007640872194419747, + 0.00027159566380024797, + 0.00022996015308673955, + 0.00024833802954206337, + 0.00044364374778733387, + 0.0007470576273977564, + 0.0002211746550224299, + 0.00021580162853390784, + 0.0013004561696102624, + 0.001029093809717287, + 0.0002544106457651686, + 0.0002108785811135509, + 0.0013078950816469098, + 0.0003512064580692178, + 0.00017522205472889698, + 0.00023451525230832682, + 0.0002561751456004715, + 0.0008311299960903278, + 0.00027109294298419514, + 0.0001558500102525818, + 0.0005234607661983475, + 0.0008319759207373707, + 0.0001754218727014044, + 0.00045799340651165606, + 0.00022132010849091919, + 0.0008758489419412517, + 0.0003462597592978885, + 0.00015475311837772924, + 0.0001497159711698912, + 0.00012747799512312212, + 0.0001455396334211435, + 0.00024425084222857725, + 0.00017605330286428898, + 0.0004178258151684344, + 0.00011957949976731382, + 0.00023881626414562883, + 0.00033720494184090753, + 0.00044905057149932556, + 0.0001199812615191171, + 0.00017127881007177186, + 0.00012499131216436014, + 0.00015106460453540187, + 0.00045908942795003504, + 0.00012861422531791484, + 0.00021456748365172038, + 0.00014073515475584745, + 0.00012155890482154499, + 0.00022641866447205384, + 0.0001147223493678155, + 0.0001196684028178407, + 0.0003815897699174396, + 0.0002366653099135404, + 0.000100989804540574, + 9.244756575279524e-05, + 9.39861539395309e-05, + 9.795369188443721e-05, + 0.00010723421733823822, + 0.00010365476198580945, + 9.428631530820995e-05, + 9.555300353725576e-05, + 9.608891233391046e-05, + 9.442413615083773e-05, + 0.00010822217056652574, + 0.0001021265688317787, + 9.125630444549987e-05, + 0.00010181061863942852, + 9.49676571249539e-05, + 9.086905509812523e-05, + 9.098630640006664e-05, + 8.721099695711227e-05, + 8.488192714935938e-05, + 8.401064521833317e-05, + 8.86755499486231e-05, + 8.730924190694642e-05, + 8.598201262347257e-05, + 8.084482624326483e-05, + 8.200556643198992e-05, + 8.195178330614121e-05, + 8.070206425408717e-05, + 8.020041073523446e-05, + 8.35882168497951e-05, + 8.021032487832734e-05, + 7.948110295615695e-05, + 7.9116949542437e-05, + 7.900211683603951e-05, + 7.89916162029492e-05, + 7.949342720608589e-05, + 7.843852823366672e-05, + 8.040153280401783e-05, + 7.915667977082437e-05, + 7.799151418785578e-05, + 7.787926835072468e-05, + 7.877991721666345e-05, + 7.784522760236528e-05, + 7.773312616988828e-05, + 0.006854034409266543, + 0.00020380999196198787, + 0.00019033925415013717, + 0.00022955964481010787, + 0.000244867994031787, + 0.00013786026014992077, + 0.00019335116159805023, + 0.00020138526563176165, + 0.00014055108343300206, + 0.00017870990535611694, + 0.0009997314953919345, + 0.0002642737358633964, + 0.00018605402512470278, + 0.0001404658827047687, + 0.0006514058841798181, + 0.00020967570138241847, + 0.00015657090620604876, + 0.00019747409697938305, + 0.00016477285060919063, + 0.0012532682364321099, + 0.00015469477477564358, + 0.00013041141782800942, + 0.00011689108654184056, + 0.00018002455821321733, + 0.0013856295973757944, + 0.00012120528819659348, + 0.00012984350429683239, + 0.00011424960018376933, + 0.00022069026227180503, + 0.0005060284600308238, + 0.0002062260925748361, + 0.00011348382882176064, + 0.0001354798249170552, + 0.00016344754875495692, + 0.0007586684214902779, + 0.00012123839655833201, + 0.00013387960372197456, + 0.00012629504251229017, + 0.00024779698955145515, + 0.00015355038407218862, + 0.0002652895607439407, + 0.0001671868387182423, + 0.00023373882379908964, + 0.0003050713839650524, + 0.00018635113081638173, + 0.000164242637991996, + 0.00049976499383374, + 0.00033483860969633835, + 0.00013354369961077965, + 0.0001268553771436118, + 0.00016046769487033498, + 0.0001324155069840292, + 0.00011885167368180182, + 0.0005197155116404343, + 0.00016921929713543943, + 0.00010269206389904919, + 0.00016320957100228017, + 0.00016677975863742316, + 0.00030489307731118733, + 0.00012204708005075543, + 0.00016123033909799846, + 0.00018315738885210599, + 0.00020966195863135372, + 0.00012323404619534364, + 0.00013568217225633924, + 0.000157982617289392, + 0.00014995781569368817, + 0.00012772625325995155, + 0.00012265482792788078, + 0.00011983970240576497, + 0.0001512212102736166, + 0.000172962145949317, + 0.0001314206898862604, + 0.00010765132183158339, + 0.00017374679959627983, + 0.00017603895235644473, + 0.00011984614076771487, + 0.00011071705315527705, + 0.0001079624949922213, + 0.00021541317198897598, + 0.00011960401494895611, + 0.0001882056496891996, + 0.00011922569864766316, + 0.00012770743293813168, + 9.478098480065708e-05, + 0.00011202018704330909, + 0.0001018327892557846, + 0.00010044308165391518, + 0.00011341541556740294, + 0.00011540508025494713, + 0.0002952963879805615, + 0.0001138275581644465, + 0.00010994364398404861, + 0.00013564791456201203, + 0.0001031991296552375, + 9.897308510108123e-05, + 0.00012566362521723254, + 9.526035904111103e-05, + 9.884126498863307e-05, + 0.000106779542534785, + 0.00010460830676558335, + 9.567088000039803e-05, + 0.00012711180183354434, + 0.00014702955994352033, + 9.568958329222561e-05, + 9.044995785293243e-05, + 9.465527941419338e-05, + 9.482656971134642e-05, + 9.153212504555229e-05, + 0.00011094492531432236, + 0.00023842759980182796, + 8.592781142651714e-05, + 8.546860715069631e-05, + 8.584293883548648e-05, + 8.870801227321123e-05, + 8.41150135174888e-05, + 0.00010566133573231049, + 8.752028834111882e-05, + 0.00010566200958520316, + 9.257502090425744e-05, + 9.388505760685611e-05, + 8.388051142329289e-05, + 8.819311846294344e-05, + 9.803245612519359e-05, + 9.247088188860733e-05, + 8.521638866923196e-05, + 8.514232396166562e-05, + 0.00010790726106783972, + 9.076278650715839e-05, + 9.70394926744512e-05, + 8.311505856284745e-05, + 8.469605725504892e-05, + 8.595398964152382e-05, + 9.049834354766028e-05, + 8.192135030139762e-05, + 8.397925313510788e-05, + 8.24538695641505e-05, + 9.2356317829103e-05, + 8.339239863481022e-05, + 8.11927734008115e-05, + 8.188087033823305e-05, + 8.229374709060538e-05, + 8.272321808621826e-05, + 8.049861010979504e-05, + 8.881142862623834e-05, + 8.1452274478331e-05, + 7.981871753532539e-05, + 8.059461397547522e-05, + 8.553706908695463e-05, + 8.008664182622317e-05, + 8.006684695553639e-05, + 8.061655191077559e-05, + 8.03482403706871e-05, + 7.937193824630687e-05, + 8.168994735228264e-05, + 8.221674633342189e-05, + 7.892309556674964e-05, + 7.84184606040502e-05, + 7.902789170020914e-05, + 7.902968753720659e-05, + 7.880258805945715e-05, + 7.862852809799759e-05, + 7.797575183608322e-05, + 7.811269824551955e-05, + 7.824533400445691e-05, + 7.802393140977484e-05, + 7.841391995365545e-05, + 7.816426248606524e-05, + 7.827873811162466e-05, + 7.736384886305842e-05, + 7.748130071021343e-05, + 7.71457762873546e-05, + 7.730160311026386e-05, + 7.706409868668445e-05, + 7.763086394537535e-05, + 7.804112016337446e-05, + 7.694302512310771e-05, + 7.694270944678478e-05, + 7.679586186339436e-05, + 7.684678092760026e-05, + 7.680468907695734e-05, + 7.668499558482953e-05, + 7.762526571134164e-05, + 7.892202400343734e-05, + 7.65269296553189e-05, + 7.645872856630233e-05, + 7.643197195733772e-05, + 7.711810052649452e-05, + 7.639755291969752e-05, + 7.652563436528525e-05, + 7.741350096143593e-05, + 7.650405197917837e-05, + 7.636868066443361e-05, + 7.624329089098995e-05, + 7.627691746181285e-05, + 7.622491502881408e-05, + 7.62063994225325e-05, + 7.636602726259724e-05, + 7.643870133243423e-05, + 7.62239778789731e-05, + 0.0018561123955126812, + 0.00022286474220316834, + 0.00014235435482291405, + 0.00046747776547456736, + 0.00017716906836213773, + 0.00014137076154251252, + 0.0001152492765344, + 0.00013814285052809352, + 0.00016402606575413657, + 0.00027681631627927077, + 0.00011655548139656733, + 0.00021360620451268392, + 0.00023464674282685767, + 0.00010679119447828862, + 0.00013808759638159768, + 0.00020582562514713634, + 0.00016348216509807584, + 0.0001524429811779468, + 0.0001157425921166978, + 0.0001458597712241007, + 0.00020526973559719156, + 0.00012880174794251603, + 0.00012221892661923731, + 0.0002644691610482965, + 0.0004216574832178977, + 0.00010205169634931274, + 0.00011096398841248032, + 0.00013768208041190728, + 0.00013989356159908002, + 0.0001659736264876053, + 0.00023932222461662257, + 0.0002924386729810676, + 0.00020371817127639178, + 0.00011918152108265666, + 0.00012329621496895811, + 0.00011345269112507377, + 0.00012185066698598282, + 0.0004387027192280854, + 0.00011069219295747239, + 0.00010724988122823404, + 0.00025008991317800625, + 0.00042352716825212045, + 0.0001057509878431089, + 0.00010067167142495054, + 0.00014816521387115572, + 0.00011225475610829967, + 0.0005473638241462388, + 9.434906052522355e-05, + 9.852566837322734e-05, + 0.00010340783035679376, + 0.00010382218918898594, + 0.00014309796114194147, + 0.00010063153979064136, + 0.00023629411172466274, + 0.00010060141709004167, + 0.00011939987698540651, + 0.00018771133102236665, + 0.000138675933125446, + 0.00027989060177161266, + 0.00011029602218509549, + 0.00011370835265430313, + 0.00010682277871294, + 0.00015765724159330443, + 0.00014371443656395033, + 0.00044101062342363865, + 0.00013033630415755737, + 0.00011196308803141511, + 0.00012907769291705683, + 9.588649428467708e-05, + 0.0001462096508125644, + 0.0002490016658240083, + 0.00011130243513252064, + 0.00012904920276488637, + 0.0001224638114162838, + 0.00012107260883467417, + 0.00019548082295993904, + 0.00012047733626682354, + 0.0001467630539943792, + 0.00015753552643261944, + 0.00013056306689407753, + 0.00021817133656288693, + 0.00018602095716150043, + 9.719294725830455e-05, + 0.00011461597037659406, + 0.00039603942276912173, + 0.0001304249552127771, + 8.990205519607394e-05, + 0.00010198978607236944, + 0.0001398989454965716, + 0.00011187999128209735, + 0.00013338928832281322, + 0.00011522883181914166, + 0.00011121841979029037, + 0.00016554914004243628, + 0.00011995466812141094, + 0.0001211741539170392, + 0.00011002969292923426, + 9.714925952146969e-05, + 0.00019702992287429487, + 0.00013786525714662554, + 9.541962138180018e-05, + 0.00015330689217280539, + 0.00011393035966043433, + 0.00012956252717731548, + 0.0003236702748196559, + 0.00015404807956012398, + 0.00010620584140905126, + 0.00010594406758260169, + 0.00010651767882828308, + 0.00011138698205917336, + 0.00017266878492147917, + 0.00011145915365569379, + 9.82942495732298e-05, + 0.00011638356644082892, + 0.00011907084794266236, + 0.00011125798601915884, + 0.0001293335780481696, + 0.0001111428827304722, + 0.00010799417924626789, + 0.0001169591219120288, + 0.00043094094646469534, + 8.602081467226404e-05, + 8.753821134306371e-05, + 8.958172792665828e-05, + 9.97661583267072e-05, + 9.743748753201295e-05, + 9.16018681769882e-05, + 0.00011140699834367988, + 0.0001797581980573824, + 0.0002904860464665496, + 9.078897430884846e-05, + 8.754011876450831e-05, + 8.41096409092634e-05, + 0.00014457833687550988, + 9.988607990835677e-05, + 0.00012445600981572644, + 9.999158673658778e-05, + 0.00011223765072522903, + 0.00010352168586444189, + 0.0001259642428454606, + 0.00010318105010923684, + 9.430294822455774e-05, + 0.00013249571432525088, + 0.0003080547820602304, + 8.749021437953514e-05, + 8.736009477576715e-05, + 9.944132171565353e-05, + 0.00016472405907312244, + 9.150817305327331e-05, + 0.0001004633119965943, + 9.260813612038756e-05, + 9.18481420553015e-05, + 9.443015849239566e-05, + 0.0001403970016399529, + 0.00016925735687663113, + 0.00010691142133357548, + 0.0001290284467261103, + 0.00012762066304215308, + 0.00010631519136697979, + 8.697213153379889e-05, + 9.960668773512146e-05, + 9.202167132279489e-05, + 8.777344278945548e-05, + 0.00017464262686642186, + 9.079726474687937e-05, + 0.00010658608803887067, + 8.435317462038095e-05, + 0.00010214400906610292, + 9.435365466146775e-05, + 0.00017259611707592858, + 9.053949413588166e-05, + 9.05166300047935e-05, + 0.00020788802858110906, + 8.49651136596031e-05, + 8.396488006151426e-05, + 8.242723380368243e-05, + 8.683780224894659e-05, + 9.185094425416998e-05, + 0.00010717609935828917, + 0.000107265326695897, + 8.505553837146386e-05, + 9.738903875251325e-05, + 8.568520357393457e-05, + 9.183725359125936e-05, + 9.827761093174341e-05, + 9.52570926703072e-05, + 9.430262341527885e-05, + 0.00011421519040365103, + 0.00014762251763249227, + 9.251819549632908e-05, + 8.95506017819684e-05, + 0.00014312897198478778, + 8.277097310974133e-05, + 8.0237952091372e-05, + 8.287237011690409e-05, + 8.136195491618802e-05, + 0.00020567819820460577, + 8.417304411468823e-05, + 8.234680516423169e-05, + 8.502542707980069e-05, + 8.273514459975367e-05, + 8.320626526390993e-05, + 8.83559666469614e-05, + 0.0001051930801951352, + 0.00011871477235762797, + 8.398290062879615e-05, + 8.53800693542208e-05, + 8.517002068034066e-05, + 0.00011141758685451994, + 8.185707541601545e-05, + 8.521280188377109e-05, + 8.54539221511535e-05, + 8.473743889136962e-05, + 0.00010044234586291025, + 8.273557646123698e-05, + 8.684492772870413e-05, + 9.604356801584721e-05, + 9.66535814217005e-05, + 9.862779151889295e-05, + 8.467610903033729e-05, + 8.375479002439167e-05, + 8.4300596762133e-05, + 9.494023562035473e-05, + 9.223204952259778e-05, + 8.632859622217552e-05, + 8.230896243707033e-05, + 8.70715894716036e-05, + 9.971298637531457e-05, + 9.797150495556167e-05, + 8.835484972708848e-05, + 8.00266332910062e-05, + 8.143951879501227e-05, + 8.155504310250406e-05, + 8.514377082848335e-05, + 8.303479764575159e-05, + 8.318620910850468e-05, + 8.122030520285755e-05, + 8.169540754234561e-05, + 8.379351593509528e-05, + 8.458393167622193e-05, + 8.702058575014043e-05, + 0.00010090996255729932, + 0.00010310138322218675, + 7.742404518390929e-05, + 7.794788811661115e-05, + 8.658484216430572e-05, + 9.852454521553714e-05, + 7.839560861487949e-05, + 7.849317117116528e-05, + 7.984594376918187e-05, + 8.296840437531883e-05, + 8.032667790955842e-05, + 8.165530066212145e-05, + 7.959501711995785e-05, + 8.154871182920364e-05, + 7.971063325268924e-05, + 8.276264718156265e-05, + 8.01197057940675e-05, + 8.420854093299167e-05, + 7.847650343591162e-05, + 7.782609055010344e-05, + 8.173429837946943e-05, + 7.923072343789443e-05, + 8.147045273279253e-05, + 8.013984330110844e-05, + 7.766065925480824e-05, + 7.925159343195058e-05, + 8.302821412746816e-05, + 8.313999647648481e-05, + 8.401274754742973e-05, + 7.996387147897805e-05, + 7.681105073836871e-05, + 7.916127944943791e-05, + 7.727105042931726e-05, + 8.307924045248566e-05, + 8.140457559081182e-05, + 7.992834958946314e-05, + 7.907238477909967e-05, + 7.841263343668275e-05, + 7.676041039811582e-05, + 7.665437674698265e-05, + 7.75740618709905e-05, + 7.670490066620219e-05, + 7.804907942022402e-05, + 7.722461567334285e-05, + 8.467047928968277e-05, + 7.919169772085689e-05, + 7.663124668062282e-05, + 7.749596644065317e-05, + 7.582260790003598e-05, + 7.648975188491919e-05, + 7.854537249915065e-05, + 7.703407302426023e-05, + 7.82461403550073e-05, + 7.617982536736349e-05, + 7.841892025407797e-05, + 7.968327200456186e-05, + 7.57045252546624e-05, + 7.553168027613263e-05, + 7.665170764657148e-05, + 7.70042561239834e-05, + 7.606278233807691e-05, + 7.575637655678514e-05, + 7.567413145555915e-05, + 7.559214063874379e-05, + 7.662813847459214e-05, + 7.637758689483644e-05, + 7.575982165730473e-05, + 7.783286029857815e-05, + 7.676720617298902e-05, + 7.577414675804458e-05, + 7.55818218678768e-05, + 7.641530390720144e-05, + 7.543347752165787e-05, + 7.535542893804906e-05, + 7.548439607794469e-05, + 7.51775800416523e-05, + 7.507529853865306e-05, + 7.49562714983504e-05, + 7.674085867202548e-05, + 7.742420151011061e-05, + 7.627960034457728e-05, + 7.525253140755287e-05, + 7.501507467172619e-05, + 7.463769388609393e-05, + 7.489126126515157e-05, + 7.717303711396716e-05, + 7.582799155315525e-05, + 7.496807734323477e-05, + 7.505612773676719e-05, + 7.458418569486361e-05, + 7.461465128258125e-05, + 7.550180788117624e-05, + 7.464820143082723e-05, + 7.450522755508599e-05, + 7.451150683773645e-05, + 7.538827656949643e-05, + 7.450022509216574e-05, + 7.453297188801169e-05, + 7.461885047415094e-05, + 7.514075906610568e-05, + 7.441328601820557e-05, + 7.53188151080801e-05, + 7.44932855993151e-05, + 7.445485033207187e-05, + 7.428758638435027e-05, + 7.420008211886016e-05, + 7.436482377619885e-05, + 7.41469508303664e-05, + 7.414629161193431e-05, + 7.586048121262839e-05, + 7.506002381746927e-05, + 7.40692406553485e-05, + 7.413331324343426e-05, + 7.406134579270925e-05, + 7.453887621515266e-05, + 7.40500776459066e-05, + 7.642978513655054e-05, + 7.401903853107479e-05, + 7.415243197606853e-05, + 7.493772750993221e-05, + 7.395980809699691e-05, + 7.415549374660154e-05, + 7.388413443931547e-05, + 7.388550899355305e-05, + 7.498676622144797e-05, + 7.400508595939198e-05, + 7.449901249257987e-05, + 7.38372916712077e-05, + 7.380564804912249e-05, + 7.627133773847132e-05, + 7.377171456266262e-05, + 7.375282130964845e-05, + 7.392713212084345e-05, + 7.438088166282158e-05, + 7.3745857717772e-05, + 7.371867697279347e-05, + 7.37227484196584e-05, + 7.371589587697625e-05, + 7.460232957347156e-05, + 7.39738154298344e-05, + 7.370498369590151e-05, + 7.36912536901376e-05, + 7.366883305793087e-05, + 7.397685822493269e-05, + 7.366121261954317e-05, + 7.366465048673771e-05, + 7.365113308345169e-05, + 7.366322612481424e-05, + 7.364180389680787e-05, + 7.5780084556984e-05, + 7.363501064881502e-05, + 7.363713098632824e-05, + 7.36282860485657e-05, + 7.380533873912649e-05, + 7.473383734071631e-05, + 7.366225412288606e-05, + 7.361981086513983e-05, + 7.362231830443165e-05, + 7.54261265379382e-05, + 0.0014170557611941624, + 0.0001048224812490352, + 9.741910171693522e-05, + 9.314769429771445e-05, + 0.0001411495370956491, + 0.00011312789891444177, + 0.0001516529734792791, + 0.00010854645131367373, + 0.00010689634981727895, + 0.00010312110406343173, + 0.00016071926620715417, + 0.00011219654900711709, + 9.245735496194082e-05, + 0.00011484186142880525, + 0.00019364555676814422, + 0.00011496020356193811, + 0.00014997278060110457, + 0.00018571563526004835, + 0.00011500418312037441, + 0.0001027609657522376, + 0.0002695996449127391, + 9.008299952165108e-05, + 0.00013512230511073388, + 0.00011539664351120333, + 0.00010718898711797158, + 0.00010282605642444969, + 0.00012751558508092672, + 0.00012939017729598871, + 0.00010101392000473443, + 0.00031448595810446386, + 0.0001531049528903951, + 9.091183915732551e-05, + 9.791211455716769e-05, + 0.00011283258227825882, + 0.00013175915119085333, + 9.361928381509276e-05, + 0.00015302983789323396, + 0.000978522025591018, + 0.00010997753589396723, + 9.0289829625963e-05, + 9.5623065293189e-05, + 8.46389583019022e-05, + 8.951160814909713e-05, + 0.0001178159726496494, + 9.938975915103679e-05, + 9.979546636157065e-05, + 0.00011098514923623387, + 0.0001219589962819768, + 0.00016412067531602683, + 0.0001974668370837064 + ], + "val_loss_history": [ + 1.4662087708711624, + 0.3671293072402477, + 0.42298682779073715, + 0.48724716901779175, + 0.47657446563243866, + 1.4997726529836655, + 1.4417381882667542, + 0.5569983646273613, + 0.44321679323911667, + 0.623302735388279, + 0.32589947432279587, + 0.3333459757268429, + 0.2927282974123955, + 0.29977669939398766, + 0.29712890461087227, + 0.5140391960740089, + 0.4739374816417694, + 0.19094478711485863, + 0.20354301668703556, + 0.2503250576555729, + 0.28962912783026695, + 0.2535230331122875, + 0.25902972370386124, + 0.1596863567829132, + 0.16293449327349663, + 0.18274914473295212, + 0.19126713275909424, + 0.18204266764223576, + 0.180587375536561, + 0.1841256245970726, + 0.21082022786140442, + 0.21168163791298866, + 0.23078095354139805, + 0.23190549574792385, + 0.2319245282560587, + 0.1610123347491026, + 0.16598569508641958, + 0.24251367524266243, + 0.1516692228615284, + 0.15032847970724106, + 0.15773381013423204, + 0.1973406132310629, + 0.4602465331554413, + 0.15836439281702042, + 0.18262272514402866, + 0.1915278546512127, + 0.16450438275933266, + 0.1945173628628254, + 0.1910428050905466, + 0.1718407403677702, + 0.2201515156775713, + 0.16606492921710014, + 0.18291366659104824, + 0.20178397372364998, + 0.20669151842594147, + 0.19400428049266338, + 0.18760884553194046, + 0.1867864467203617, + 0.19706737250089645, + 0.19413182698190212, + 0.19664104841649532, + 0.20322608202695847, + 0.2092195376753807, + 0.21008620783686638, + 0.21810556389391422, + 0.2295383531600237, + 0.229572718963027, + 0.23636629432439804, + 0.24162410758435726, + 0.2487388700246811, + 0.2518967315554619, + 0.2528084386140108, + 0.25218342058360577, + 0.2537544909864664, + 0.253830473870039, + 0.39643141627311707, + 0.31004025787115097, + 0.27024124190211296, + 0.2565183751285076, + 0.25820692628622055 + ], + "val_epochs": [ + 10, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 110, + 120, + 130, + 140, + 150, + 160, + 170, + 180, + 190, + 200, + 210, + 220, + 230, + 240, + 250, + 260, + 270, + 280, + 290, + 300, + 310, + 320, + 330, + 340, + 350, + 360, + 370, + 380, + 390, + 400, + 410, + 420, + 430, + 440, + 450, + 460, + 470, + 480, + 490, + 500, + 510, + 520, + 530, + 540, + 550, + 560, + 570, + 580, + 590, + 600, + 610, + 620, + 630, + 640, + 650, + 660, + 670, + 680, + 690, + 700, + 710, + 720, + 730, + 740, + 750, + 760, + 770, + 780, + 790, + 800 + ], + "lr_history": [ + 0.0004995067807706465, + 0.0004980290695935538, + 0.0004955726983196357, + 0.0004921473611241014, + 0.0004877665762479737, + 0.0004824476326477685, + 0.00047621152176388165, + 0.00046908285467696376, + 0.0004610897649792287, + 0.0004522637977440182, + 0.0004426397850318086, + 0.0004322557084239819, + 0.0004211525491268758, + 0.0004093741262376851, + 0.00039696692381050374, + 0.0003839799074050002, + 0.0003704643308417238, + 0.00035647353392668997, + 0.00034206273194353537, + 0.00032728879774401816, + 0.00031221003729685543, + 0.00029688595958070194, + 0.00028137704172939796, + 0.0002657444903563519, + 0.00025005000000000003, + 0.00023435550964364817, + 0.00021872295827060222, + 0.00020321404041929815, + 0.00018788996270314455, + 0.000172811202255982, + 0.00015803726805646483, + 0.0001436264660733101, + 0.00012963566915827625, + 0.00011612009259499975, + 0.00010313307618949638, + 9.0725873762315e-05, + 7.894745087312426e-05, + 6.784429157601825e-05, + 5.746021496819151e-05, + 4.783620225598192e-05, + 3.901023502077139e-05, + 3.1017145323036304e-05, + 2.3888478236118464e-05, + 1.7652367352231577e-05, + 1.2333423752026377e-05, + 7.952638875898691e-06, + 4.527301680364255e-06, + 2.070930406446283e-06, + 5.932192293535239e-07, + 0.0005, + 0.0004998766647634146, + 0.0004995067807706465, + 0.0004988907130525399, + 0.0004980290695935538, + 0.0004969227007317547, + 0.0004955726983196357, + 0.0004939803946465899, + 0.0004921473611241014, + 0.0004900754067349519, + 0.0004877665762479737, + 0.0004852231482001087, + 0.0004824476326477685, + 0.0004794427686897111, + 0.00047621152176388165, + 0.0004727570807208826, + 0.00046908285467696376, + 0.00046519246964963576, + 0.0004610897649792287, + 0.0004567787895399268, + 0.0004522637977440182, + 0.0004475492453433038, + 0.0004426397850318086, + 0.0004375402618541334, + 0.0004322557084239819, + 0.0004267913399575776, + 0.0004211525491268758, + 0.00041534490073764685, + 0.0004093741262376851, + 0.00040324611806056155, + 0.00039696692381050374, + 0.00039054274029414013, + 0.0003839799074050002, + 0.0003772849018668054, + 0.0003704643308417238, + 0.00036352492540989975, + 0.00035647353392668997, + 0.0003493171152641634, + 0.00034206273194353537, + 0.0003347175431653107, + 0.00032728879774401816, + 0.00031978382695450544, + 0.00031221003729685543, + 0.0003045749031870659, + 0.00029688595958070194, + 0.00028915079453680577, + 0.00028137704172939796, + 0.0002735723729139627, + 0.0002657444903563519, + 0.0002579011192315782, + 0.00025005000000000003, + 0.00024219888076842187, + 0.00023435550964364817, + 0.0002265276270860374, + 0.00021872295827060222, + 0.00021094920546319433, + 0.00020321404041929815, + 0.0001955250968129343, + 0.00018788996270314455, + 0.0001803161730454947, + 0.000172811202255982, + 0.00016538245683468946, + 0.00015803726805646483, + 0.0001507828847358366, + 0.0001436264660733101, + 0.00013657507459010032, + 0.00012963566915827625, + 0.0001228150981331947, + 0.00011612009259499975, + 0.00010955725970585995, + 0.00010313307618949638, + 9.685388193943852e-05, + 9.0725873762315e-05, + 8.475509926235327e-05, + 7.894745087312426e-05, + 7.330866004242248e-05, + 6.784429157601825e-05, + 6.255973814586664e-05, + 5.746021496819151e-05, + 5.255075465669619e-05, + 4.783620225598192e-05, + 4.3321210460073306e-05, + 3.901023502077139e-05, + 3.490753035036427e-05, + 3.1017145323036304e-05, + 2.7342919279117476e-05, + 2.3888478236118464e-05, + 2.0657231310288973e-05, + 1.7652367352231577e-05, + 1.487685179989135e-05, + 1.2333423752026377e-05, + 1.002459326504811e-05, + 7.952638875898691e-06, + 6.119605353410107e-06, + 4.527301680364255e-06, + 3.177299268245342e-06, + 2.070930406446283e-06, + 1.209286947460155e-06, + 5.932192293535239e-07, + 2.2333523658538658e-07, + 0.0005, + 0.0004999691642887911, + 0.0004998766647634146, + 0.0004997225242467444, + 0.0004995067807706465, + 0.0004992294875665954, + 0.0004988907130525399, + 0.0004984905408160222, + 0.0004980290695935538, + 0.0004975064132462536, + 0.0004969227007317547, + 0.0004962780760723858, + 0.0004955726983196357, + 0.0004948067415149103, + 0.0004939803946465899, + 0.0004930938616033993, + 0.0004921473611241014, + 0.0004911411267435266, + 0.0004900754067349519, + 0.0004889504640488426, + 0.0004877665762479737, + 0.000486524035438945, + 0.0004852231482001087, + 0.00048386423550592544, + 0.0004824476326477685, + 0.00048097368915119617, + 0.0004794427686897111, + 0.0004778552489950296, + 0.00047621152176388165, + 0.00047451199256136594, + 0.0004727570807208826, + 0.000470947219240669, + 0.00046908285467696376, + 0.000467164447033826, + 0.00046519246964963576, + 0.0004631674090803054, + 0.0004610897649792287, + 0.00045896004997399923, + 0.0004567787895399268, + 0.00045454652187038464, + 0.0004522637977440182, + 0.0004499311803888484, + 0.0004475492453433038, + 0.00044511858031421556, + 0.0004426397850318086, + 0.00044011347110172784, + 0.0004375402618541334, + 0.0004349207921899035, + 0.0004322557084239819, + 0.0004295456681259091, + 0.0004267913399575776, + 0.000423993403508249, + 0.0004211525491268758, + 0.0004182694777517679, + 0.00041534490073764685, + 0.00041237953968012946, + 0.0004093741262376851, + 0.00040632940195110964, + 0.00040324611806056155, + 0.0004001250353202048, + 0.00039696692381050374, + 0.00039377256274821757, + 0.00039054274029414013, + 0.0003872782533586331, + 0.0003839799074050002, + 0.00038064851625075144, + 0.0003772849018668054, + 0.00037388989417468034, + 0.0003704643308417238, + 0.00036700905707443037, + 0.00036352492540989975, + 0.00036001279550548597, + 0.00035647353392668997, + 0.000352908013933347, + 0.0003493171152641634, + 0.00034570172391965427, + 0.00034206273194353537, + 0.00033840103720262537, + 0.0003347175431653107, + 0.0003310131586786275, + 0.00032728879774401816, + 0.00032354537929181443, + 0.00031978382695450544, + 0.00031600506883884495, + 0.00031221003729685543, + 0.00030839966869578364, + 0.0003045749031870659, + 0.0003007366844743604, + 0.00029688595958070194, + 0.0002930236786148384, + 0.00028915079453680577, + 0.00028526826292279886, + 0.00028137704172939796, + 0.0002774780910572068, + 0.0002735723729139627, + 0.0002696608509771749, + 0.0002657444903563519, + 0.0002618242573548752, + 0.0002579011192315782, + 0.0002539760439620896, + 0.00025005000000000003, + 0.0002461239560379105, + 0.00024219888076842187, + 0.00023827574264512487, + 0.00023435550964364817, + 0.00023043914902282523, + 0.0002265276270860374, + 0.00022262190894279326, + 0.00021872295827060222, + 0.0002148317370772013, + 0.00021094920546319433, + 0.0002070763213851616, + 0.00020321404041929815, + 0.00019936331552563975, + 0.0001955250968129343, + 0.00019170033130421643, + 0.00018788996270314455, + 0.0001840949311611551, + 0.0001803161730454947, + 0.0001765546207081857, + 0.000172811202255982, + 0.00016908684132137255, + 0.00016538245683468946, + 0.00016169896279737475, + 0.00015803726805646483, + 0.0001543982760803459, + 0.0001507828847358366, + 0.00014719198606665307, + 0.0001436264660733101, + 0.00014008720449451402, + 0.00013657507459010032, + 0.00013309094292556975, + 0.00012963566915827625, + 0.00012621010582531973, + 0.0001228150981331947, + 0.00011945148374924862, + 0.00011612009259499975, + 0.00011282174664136695, + 0.00010955725970585995, + 0.00010632743725178254, + 0.00010313307618949638, + 9.997496467979533e-05, + 9.685388193943852e-05, + 9.37705980488905e-05, + 9.0725873762315e-05, + 8.772046031987063e-05, + 8.475509926235327e-05, + 8.183052224823215e-05, + 7.894745087312426e-05, + 7.610659649175105e-05, + 7.330866004242248e-05, + 7.0554331874091e-05, + 6.784429157601825e-05, + 6.517920781009649e-05, + 6.255973814586664e-05, + 5.9986528898272277e-05, + 5.746021496819151e-05, + 5.498141968578453e-05, + 5.255075465669619e-05, + 5.016881961115172e-05, + 4.783620225598192e-05, + 4.555347812961544e-05, + 4.3321210460073306e-05, + 4.113995002600091e-05, + 3.901023502077139e-05, + 3.6932590919694725e-05, + 3.490753035036427e-05, + 3.2935552966174087e-05, + 3.1017145323036304e-05, + 2.915278075933107e-05, + 2.7342919279117476e-05, + 2.5588007438634147e-05, + 2.3888478236118464e-05, + 2.2244751004970508e-05, + 2.0657231310288973e-05, + 1.9126310848803938e-05, + 1.7652367352231577e-05, + 1.623576449407465e-05, + 1.487685179989135e-05, + 1.3575964561055056e-05, + 1.2333423752026377e-05, + 1.1149535951157389e-05, + 1.002459326504811e-05, + 8.9588732564734e-06, + 7.952638875898691e-06, + 7.006138396600745e-06, + 6.119605353410107e-06, + 5.2932584850896236e-06, + 4.527301680364255e-06, + 3.821923927614272e-06, + 3.177299268245342e-06, + 2.5935867537464437e-06, + 2.070930406446283e-06, + 1.6094591839778604e-06, + 1.209286947460155e-06, + 8.705124334046655e-07, + 5.932192293535239e-07, + 3.7747575325562265e-07, + 2.2333523658538658e-07, + 1.308357112089361e-07, + 0.0005, + 0.0004999922909533153, + 0.0004999691642887911, + 0.0004999306214329893, + 0.0004998766647634146, + 0.0004998072976083687, + 0.0004997225242467444, + 0.000499622349907762, + 0.0004995067807706465, + 0.0004993758239642467, + 0.0004992294875665954, + 0.0004990677806044107, + 0.0004988907130525399, + 0.0004986982958333435, + 0.0004984905408160222, + 0.0004982674608158839, + 0.0004980290695935538, + 0.0004977753818541255, + 0.0004975064132462536, + 0.000497222180361189, + 0.0004969227007317547, + 0.000496607992831265, + 0.0004962780760723858, + 0.0004959329708059365, + 0.0004955726983196357, + 0.0004951972808367875, + 0.0004948067415149103, + 0.0004944011044443094, + 0.0004939803946465899, + 0.0004935446380731139, + 0.0004930938616033993, + 0.0004926280930434622, + 0.0004921473611241014, + 0.000491651695499126, + 0.0004911411267435266, + 0.0004906156863515891, + 0.0004900754067349519, + 0.0004895203212206068, + 0.0004889504640488426, + 0.0004883658703711344, + 0.0004877665762479737, + 0.0004871526186466449, + 0.000486524035438945, + 0.00048588086539884704, + 0.0004852231482001087, + 0.000484550924413825, + 0.00048386423550592544, + 0.00048316312383461644, + 0.0004824476326477685, + 0.00048171780608024823, + 0.00048097368915119617, + 0.0004802153277612496, + 0.0004794427686897111, + 0.0004786560595916635, + 0.0004778552489950296, + 0.00047704038629757913, + 0.00047621152176388165, + 0.00047536870652220586, + 0.00047451199256136594, + 0.0004736414327275144, + 0.0004727570807208826, + 0.00047185899109246794, + 0.000470947219240669, + 0.0004700218214078685, + 0.00046908285467696376, + 0.0004681303769678457, + 0.000467164447033826, + 0.00046618512445801283, + 0.00046519246964963576, + 0.00046418654384031924, + 0.0004631674090803054, + 0.00046213512823462693, + 0.0004610897649792287, + 0.00046003138379704055, + 0.00045896004997399923, + 0.00045787582959502126, + 0.0004567787895399268, + 0.000455668997479314, + 0.00045454652187038464, + 0.00045341143195272183, + 0.0004522637977440182, + 0.00045110369003575765, + 0.0004499311803888484, + 0.0004487463411292082, + 0.0004475492453433038, + 0.0004463399668736422, + 0.00044511858031421556, + 0.0004438851610059001, + 0.0004426397850318086, + 0.00044138252921259745, + 0.00044011347110172784, + 0.0004388326889806817, + 0.0004375402618541334, + 0.00043623626944507595, + 0.0004349207921899035, + 0.00043359391123344967, + 0.0004322557084239819, + 0.00043090626630815294, + 0.0004295456681259091, + 0.0004281739978053551, + 0.0004267913399575776, + 0.00042539777987142546, + 0.000423993403508249, + 0.0004225782974965976, + 0.0004211525491268758, + 0.00041971624634595887, + 0.0004182694777517679, + 0.00041681233258780476, + 0.00041534490073764685, + 0.000413867272719403, + 0.00041237953968012946, + 0.0004108817933902081, + 0.0004093741262376851, + 0.000407856631222572, + 0.00040632940195110964, + 0.00040479253262999307, + 0.00040324611806056155, + 0.0004016902536329499, + 0.0004001250353202048, + 0.0003985505596723646, + 0.00039696692381050374, + 0.0003953742254207417, + 0.00039377256274821757, + 0.00039216203459102936, + 0.00039054274029414013, + 0.00038891477974324965, + 0.0003872782533586331, + 0.0003856332620889466, + 0.0003839799074050002, + 0.00038231829129349896, + 0.00038064851625075144, + 0.00037897068527634767, + 0.0003772849018668054, + 0.000375591270009186, + 0.00037388989417468034, + 0.000372180879312164, + 0.0003704643308417238, + 0.00036874035464815513, + 0.00036700905707443037, + 0.0003652705449151389, + 0.00036352492540989975, + 0.0003617723062367468, + 0.00036001279550548597, + 0.0003582465017510275, + 0.00035647353392668997, + 0.0003546940013974802, + 0.000352908013933347, + 0.00035111568170240984, + 0.0003493171152641634, + 0.0003475124255626577, + 0.00034570172391965427, + 0.0003438851220277596, + 0.00034206273194353537, + 0.0003402346660805861, + 0.00033840103720262537, + 0.0003365619584165194, + 0.0003347175431653107, + 0.0003328679052212197, + 0.0003310131586786275, + 0.0003291534179470375, + 0.00032728879774401816, + 0.00032541941308812677, + 0.00032354537929181443, + 0.00032166681195431327, + 0.00031978382695450544, + 0.0003178965404437755, + 0.00031600506883884495, + 0.0003141095288145925, + 0.00031221003729685543, + 0.0003103067114552181, + 0.00030839966869578364, + 0.00030648902665393255, + 0.0003045749031870659, + 0.0003026574163673355, + 0.0003007366844743604, + 0.00029881282598793134, + 0.00029688595958070194, + 0.0002949562041108685, + 0.0002930236786148384, + 0.00029108850229988734, + 0.00028915079453680577, + 0.000287210674852536, + 0.00028526826292279886, + 0.00028332367856471184, + 0.00028137704172939796, + 0.0002794284724945866, + 0.0002774780910572068, + 0.0002755260177259728, + 0.0002735723729139627, + 0.00027161727713119097, + 0.0002696608509771749, + 0.00026770321513349516, + 0.0002657444903563519, + 0.0002637847974691159, + 0.0002618242573548752, + 0.00025986299094897926, + 0.0002579011192315782, + 0.00025593876322016094, + 0.0002539760439620896, + 0.00025201308252713344, + 0.00025005000000000003, + 0.0002480869174728667, + 0.0002461239560379105, + 0.0002441612367798391, + 0.00024219888076842187, + 0.00024023700905102083, + 0.00023827574264512487, + 0.00023631520253088428, + 0.00023435550964364817, + 0.00023239678486650493, + 0.00023043914902282523, + 0.00022848272286880912, + 0.0002265276270860374, + 0.00022457398227402727, + 0.00022262190894279326, + 0.00022067152750541354, + 0.00021872295827060222, + 0.0002167763214352883, + 0.0002148317370772013, + 0.00021288932514746414, + 0.00021094920546319433, + 0.0002090114977001127, + 0.0002070763213851616, + 0.00020514379588913153, + 0.00020321404041929815, + 0.00020128717401206878, + 0.00019936331552563975, + 0.00019744258363266472, + 0.0001955250968129343, + 0.00019361097334606763, + 0.00019170033130421643, + 0.00018979328854478198, + 0.00018788996270314455, + 0.0001859904711854075, + 0.0001840949311611551, + 0.00018220345955622473, + 0.0001803161730454947, + 0.00017843318804568685, + 0.0001765546207081857, + 0.0001746805869118734, + 0.000172811202255982, + 0.00017094658205296258, + 0.00016908684132137255, + 0.00016723209477878038, + 0.00016538245683468946, + 0.00016353804158348066, + 0.00016169896279737475, + 0.00015986533391941399, + 0.00015803726805646483, + 0.00015621487797224057, + 0.0001543982760803459, + 0.00015258757443734235, + 0.0001507828847358366, + 0.0001489843182975902, + 0.00014719198606665307, + 0.00014540599860251983, + 0.0001436264660733101, + 0.00014185349824897254, + 0.00014008720449451402, + 0.00013832769376325333, + 0.00013657507459010032, + 0.00013482945508486125, + 0.00013309094292556975, + 0.00013135964535184483, + 0.00012963566915827625, + 0.0001279191206878361, + 0.00012621010582531973, + 0.00012450872999081406, + 0.0001228150981331947, + 0.00012112931472365242, + 0.00011945148374924862, + 0.00011778170870650111, + 0.00011612009259499975, + 0.00011446673791105345, + 0.00011282174664136695, + 0.00011118522025675042, + 0.00010955725970585995, + 0.00010793796540897072, + 0.00010632743725178254, + 0.00010472577457925836, + 0.00010313307618949638, + 0.00010154944032763549, + 9.997496467979533e-05, + 9.840974636705015e-05, + 9.685388193943852e-05, + 9.530746737000698e-05, + 9.37705980488905e-05, + 9.224336877742805e-05, + 9.0725873762315e-05, + 8.921820660979198e-05, + 8.772046031987063e-05, + 8.623272728059715e-05, + 8.475509926235327e-05, + 8.328766741219535e-05, + 8.183052224823215e-05, + 8.038375365404121e-05, + 7.894745087312426e-05, + 7.752170250340243e-05, + 7.610659649175105e-05, + 7.470222012857463e-05, + 7.330866004242248e-05, + 7.192600219464493e-05, + 7.0554331874091e-05, + 6.919373369184713e-05, + 6.784429157601825e-05, + 6.650608876655037e-05, + 6.517920781009649e-05, + 6.386373055492406e-05, + 6.255973814586664e-05, + 6.126731101931838e-05, + 5.9986528898272277e-05, + 5.871747078740266e-05, + 5.746021496819151e-05, + 5.621483899410003e-05, + 5.498141968578453e-05, + 5.3760033126357784e-05, + 5.255075465669619e-05, + 5.135365887079188e-05, + 5.016881961115172e-05, + 4.8996309964242393e-05, + 4.783620225598192e-05, + 4.6688568047278314e-05, + 4.555347812961544e-05, + 4.443100252068613e-05, + 4.3321210460073306e-05, + 4.222417040497888e-05, + 4.113995002600091e-05, + 4.006861620295948e-05, + 3.901023502077139e-05, + 3.7964871765373185e-05, + 3.6932590919694725e-05, + 3.5913456159680876e-05, + 3.490753035036427e-05, + 3.391487554198727e-05, + 3.2935552966174087e-05, + 3.196962303215441e-05, + 3.1017145323036304e-05, + 3.007817859213163e-05, + 2.915278075933107e-05, + 2.8241008907532206e-05, + 2.7342919279117476e-05, + 2.6458567272485706e-05, + 2.5588007438634147e-05, + 2.4731293477794164e-05, + 2.3888478236118464e-05, + 2.3059613702420914e-05, + 2.2244751004970508e-05, + 2.1443940408336528e-05, + 2.0657231310288973e-05, + 1.9884672238750507e-05, + 1.9126310848803938e-05, + 1.838219391975181e-05, + 1.7652367352231577e-05, + 1.693687616538366e-05, + 1.623576449407465e-05, + 1.5549075586175112e-05, + 1.487685179989135e-05, + 1.4219134601153039e-05, + 1.3575964561055056e-05, + 1.2947381353355154e-05, + 1.2333423752026377e-05, + 1.1734129628865652e-05, + 1.1149535951157389e-05, + 1.0579678779393314e-05, + 1.002459326504811e-05, + 9.484313648410866e-06, + 8.9588732564734e-06, + 8.44830450087403e-06, + 7.952638875898691e-06, + 7.471906956537812e-06, + 7.006138396600745e-06, + 6.5553619268861575e-06, + 6.119605353410107e-06, + 5.698895555690592e-06, + 5.2932584850896236e-06, + 4.902719163212554e-06, + 4.527301680364255e-06, + 4.167029194063519e-06, + 3.821923927614272e-06, + 3.4920071687349666e-06, + 3.177299268245342e-06, + 2.8778196388110964e-06, + 2.5935867537464437e-06, + 2.3246181458746172e-06, + 2.070930406446283e-06, + 1.8325391841161732e-06, + 1.6094591839778604e-06, + 1.4017041666564668e-06, + 1.209286947460155e-06, + 1.0322193955893331e-06, + 8.705124334046655e-07, + 7.241760357533027e-07, + 5.932192293535239e-07, + 4.776500922380439e-07, + 3.7747575325562265e-07, + 2.927023916313109e-07, + 2.2333523658538658e-07, + 1.693785670107607e-07, + 1.308357112089361e-07, + 1.0770904668482361e-07, + 0.0005, + 0.0004999980727308985, + 0.0004999922909533153, + 0.000499982654756412, + 0.0004999691642887911, + 0.0004999518197584921, + 0.0004999306214329893, + 0.0004999055696391869, + 0.0004998766647634146, + 0.0004998439072514214, + 0.0004998072976083687, + 0.0004997668363988224, + 0.0004997225242467444, + 0.0004996743618354829, + 0.000499622349907762, + 0.0004995664892656698, + 0.0004995067807706465, + 0.000499443225343471, + 0.0004993758239642467, + 0.0004993045776723862, + 0.0004992294875665954, + 0.0004991505548048565, + 0.0004990677806044107, + 0.0004989811662417383, + 0.0004988907130525399, + 0.0004987964224317156, + 0.0004986982958333435, + 0.0004985963347706572, + 0.0004984905408160222, + 0.0004983809156009122, + 0.0004982674608158839, + 0.00049815017821055, + 0.0004980290695935538, + 0.0004979041368325396, + 0.0004977753818541255, + 0.0004976428066438721, + 0.0004975064132462536, + 0.0004973662037646246, + 0.000497222180361189, + 0.0004970743452569655, + 0.0004969227007317547, + 0.0004967672491241025, + 0.000496607992831265, + 0.0004964449343091714, + 0.0004962780760723858, + 0.0004961074206940684, + 0.0004959329708059365, + 0.000495754729098223, + 0.0004955726983196357, + 0.0004953868812773144, + 0.0004951972808367875 + ], + "best_val_loss": 0.15032847970724106 +} \ No newline at end of file diff --git a/cleanroom/outputs_800/rollout_results_epoch_70.npy b/cleanroom/outputs_800/rollout_results_epoch_70.npy new file mode 100644 index 0000000000000000000000000000000000000000..dd4a7bd3b1568f29783bb7c22a6767e2586d5ebe --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_70.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53cef2865e68686bae64df65b4ae47130c008f6acd88595614c0b625ec7a2098 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_700.npy b/cleanroom/outputs_800/rollout_results_epoch_700.npy new file mode 100644 index 0000000000000000000000000000000000000000..a0f39d3ef82b529c3cf34ec78b925da43d78f382 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_700.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99c2c1a3d5e4589524a207f2ed44fc978e5df7a57312942a542cfeb0f6e85b02 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_710.npy b/cleanroom/outputs_800/rollout_results_epoch_710.npy new file mode 100644 index 0000000000000000000000000000000000000000..7c70863a108fb08cc9fbd05f967ff35662dc1bd8 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_710.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db296426d9235c3528cd3f751339dfbbe9dab93494e87aaa18aba62dd7d63266 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_720.npy b/cleanroom/outputs_800/rollout_results_epoch_720.npy new file mode 100644 index 0000000000000000000000000000000000000000..2b4dcf5032eeace10bc859061f414cf7818b307a --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_720.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1868910a3a64a8a8b49d8c8252c73a1aba7d4faef1e619c5d9fcf4b1b54317cd +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_730.npy b/cleanroom/outputs_800/rollout_results_epoch_730.npy new file mode 100644 index 0000000000000000000000000000000000000000..c0b772f77e764b6a153e5d2a08444022ff8ad017 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_730.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2200bb21bbdb3e9eaf5181395bf0f346c78e2c5585dd6f096a065229b70fbf +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_740.npy b/cleanroom/outputs_800/rollout_results_epoch_740.npy new file mode 100644 index 0000000000000000000000000000000000000000..449ae82877cbf322eadf96f4370c75800020543d --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_740.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47c85168f4f16b38090aadb434d05e4fe71acc1f99a976ca39a6a0350d8a4360 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_750.npy b/cleanroom/outputs_800/rollout_results_epoch_750.npy new file mode 100644 index 0000000000000000000000000000000000000000..0715279f6d43c68e61bf5ca922a42af46c70ef6f --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_750.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd987208ca532b9f7183368fba4f216f3c9f8fa8b7b8c1904c9893d011da41f +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_760.npy b/cleanroom/outputs_800/rollout_results_epoch_760.npy new file mode 100644 index 0000000000000000000000000000000000000000..7a811d1e70d9f76a95744aca9d78878d2118191d --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_760.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cd4eb4ad34b4291efdd177f5c22c88fe8858b06814dd4acc00141cfa8a677d4 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_770.npy b/cleanroom/outputs_800/rollout_results_epoch_770.npy new file mode 100644 index 0000000000000000000000000000000000000000..342a921ef9a5dd15a02f7d882854b38eead40b3f --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_770.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:846cabc3177ce3061282cedb54e558596d526771fa995e2dea6829244475ff47 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_780.npy b/cleanroom/outputs_800/rollout_results_epoch_780.npy new file mode 100644 index 0000000000000000000000000000000000000000..7809110e7add85e614eec89fbbbba1cc1c653b85 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_780.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa3f79f69f745a019cf60527a2ece78ea2bb12cd9077691da724bd7165baa752 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_790.npy b/cleanroom/outputs_800/rollout_results_epoch_790.npy new file mode 100644 index 0000000000000000000000000000000000000000..a8ecdc8ae5d3990cb4fba9b6b5cbdfc00fcb3eb1 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_790.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e84e59a3889dbff32c57828d3338831a27ecd7985572300de9b759dab86eba50 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_80.npy b/cleanroom/outputs_800/rollout_results_epoch_80.npy new file mode 100644 index 0000000000000000000000000000000000000000..df75b195507e73f977a16ea42e22cc124455083f --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_80.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e25c35728f350057dcfd458bf80e0b5218efecc490983f8eb413195a601c7f7f +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_800.npy b/cleanroom/outputs_800/rollout_results_epoch_800.npy new file mode 100644 index 0000000000000000000000000000000000000000..747a6b2406fd1a997419c67ce04a62b27fda05a0 --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_800.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f3f3d50b7dcf7a134a78f2c7cea598474e51befc100e650ee3dc90fef967706 +size 109707267 diff --git a/cleanroom/outputs_800/rollout_results_epoch_90.npy b/cleanroom/outputs_800/rollout_results_epoch_90.npy new file mode 100644 index 0000000000000000000000000000000000000000..958d42461b38d89c81605efebf8f3ee8c0c2031e --- /dev/null +++ b/cleanroom/outputs_800/rollout_results_epoch_90.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0e9d488f7bef9d7b070c53fda22668af00817d9f748e3c10187d31596bbc31a +size 109707267 diff --git a/cleanroom/rawdataset/final_data_timestep_data_test.pt b/cleanroom/rawdataset/final_data_timestep_data_test.pt new file mode 100644 index 0000000000000000000000000000000000000000..b315d446ec8ce3e5640127644efdbd826600835b --- /dev/null +++ b/cleanroom/rawdataset/final_data_timestep_data_test.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5de6e16a6f08c28d2c6bf8c8e829a54286fe98ea5caa0fd97ff875355e2816b +size 109697239 diff --git a/cleanroom/rawdataset/final_data_timestep_data_train.pt b/cleanroom/rawdataset/final_data_timestep_data_train.pt new file mode 100644 index 0000000000000000000000000000000000000000..7a1ed7235fcbcc17af585d1507908cd5383bdd69 --- /dev/null +++ b/cleanroom/rawdataset/final_data_timestep_data_train.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d54e6507bfffc00d8c1a8a67c02fbc73817073be55f9869fab3785184647401d +size 466356733 diff --git a/cleanroom/readingptfile.ipynb b/cleanroom/readingptfile.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4f9acb87efdaba2bc36d23b19707eaa675604f31 --- /dev/null +++ b/cleanroom/readingptfile.ipynb @@ -0,0 +1,350 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "899c057f", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2522c5ba", + "metadata": {}, + "outputs": [], + "source": [ + "load = np.load(\"/home/gd_user1/AnK/project_PINN/cleanroom/outputs_800/rollout_results_epoch_800.npy\", allow_pickle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5f52b647", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['ground_truth_trajectory', 'predicted_trajectory', 'coordinates', 'node_type', 'meta_info'])" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "load[2].keys()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7de70392", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[ 0.1196, -0.4992, -0.0659],\n", + " [ 0.1868, 0.2426, 0.0847],\n", + " [-0.0145, -0.5594, -0.0683],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]],\n", + "\n", + " [[ 0.1195, -0.4993, -0.0659],\n", + " [ 0.1867, 0.2427, 0.0849],\n", + " [-0.0145, -0.5594, -0.0683],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]],\n", + "\n", + " [[ 0.1195, -0.4993, -0.0659],\n", + " [ 0.1867, 0.2427, 0.0851],\n", + " [-0.0146, -0.5595, -0.0683],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]],\n", + "\n", + " ...,\n", + "\n", + " [[ 0.1211, -0.5029, -0.0667],\n", + " [ 0.1806, 0.2450, 0.0863],\n", + " [-0.0136, -0.5589, -0.0679],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]],\n", + "\n", + " [[ 0.1211, -0.5029, -0.0667],\n", + " [ 0.1807, 0.2450, 0.0860],\n", + " [-0.0134, -0.5588, -0.0680],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]],\n", + "\n", + " [[ 0.1211, -0.5030, -0.0667],\n", + " [ 0.1808, 0.2450, 0.0857],\n", + " [-0.0133, -0.5587, -0.0681],\n", + " ...,\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000],\n", + " [ 0.0000, 0.0000, 0.0000]]])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "load[2]['ground_truth_trajectory'][1:]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "da254464", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([48, 22626, 3])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "load[2]['predicted_trajectory'][:-1].shape" + ] + }, + { + "cell_type": "markdown", + "id": "e1a0a625", + "metadata": {}, + "source": [ + "# Animation portion" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "6ffb64ae", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pyvista as pv\n", + "import matplotlib.pyplot as plt\n", + "from sklearn.metrics import r2_score\n", + "import torch\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "e76d49ea", + "metadata": {}, + "outputs": [], + "source": [ + "val_data= np.load(r'/home/gd_user1/AnK/project_PINN/cleanroom/outputs_800/rollout_results_epoch_800.npy', allow_pickle=True)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "f91018c7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "4" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(val_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "81377251", + "metadata": {}, + "outputs": [], + "source": [ + "first_element=val_data[2]" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "012e8c43", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[ 7.3562e+00, 1.5680e+00, 3.2644e+00],\n", + " [ 5.9370e+00, 1.1662e+00, 3.9563e+00],\n", + " [ 6.5281e+00, 1.8776e+00, 2.4812e+00],\n", + " ...,\n", + " [-3.0000e-03, 2.5984e+00, 2.2542e+00],\n", + " [-3.0000e-03, 2.7853e+00, 2.1156e+00],\n", + " [-3.0000e-03, 2.7787e+00, 2.3665e+00]])" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + " ijk = first_element['coordinates']\n", + " ijk" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "743effdb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating animation with error plot...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing frame for time step 43\n", + "GIF saved to: /home/gd_user1/AnK/project_PINN/cleanroom/animation/animation_testing.gif\n" + ] + } + ], + "source": [ + "\n", + "\n", + "gif_path_3 = \"/home/gd_user1/AnK/project_PINN/cleanroom/animation/animation_testing.gif\"\n", + "geom = pv.Arrow()\n", + "sargs_true = {'title': 'Ground Truth Velocity (m/s)'}\n", + "sargs_pred = {'title': 'Predicted Velocity (m/s)'}\n", + "# New scalar bar arguments for the error plot\n", + "sargs_error = {'title': 'Error Magnitude (m/s)'}\n", + "\n", + "# MODIFICATION: Changed shape to (1, 3) and increased window size\n", + "pl = pv.Plotter(shape=(1, 3), border=False, off_screen=True, window_size=[2400, 800])\n", + "pl.set_background(color=[242/255, 242/255, 242/255])\n", + "\n", + "# Camera setup\n", + "pl.link_views()\n", + "pl.show_axes()\n", + "pl.view_isometric()\n", + "\n", + "# Open GIF writer\n", + "pl.open_gif(gif_path_3)\n", + "print(\"Generating animation with error plot...\")\n", + "\n", + "for i in range(1, 46, 2):\n", + " # first_element = val_data[i]\n", + " gt_vel = first_element['ground_truth_trajectory'][i].cpu().numpy()\n", + " pred_velo = first_element['predicted_trajectory'][i-1].cpu().numpy()\n", + " ijk = first_element['coordinates'].cpu().numpy()\n", + " mesh = pv.PolyData(ijk)\n", + " \n", + " # --- Update mesh data for all vectors ---\n", + " mesh['vectors1'] = gt_vel\n", + " mesh['vectors2'] = pred_velo\n", + " \n", + " magnitudes = np.linalg.norm(gt_vel, axis=1)\n", + " mesh['scalars1'] = magnitudes\n", + " \n", + " magnitudes2 = np.linalg.norm(pred_velo, axis=1)\n", + " mesh['scalars2'] = magnitudes2\n", + " \n", + " # --- NEW: Calculate and add error data to the mesh ---\n", + " error_vectors = gt_vel - pred_velo\n", + " error_magnitudes = np.linalg.norm(error_vectors, axis=1)\n", + " mesh['error_vectors'] = error_vectors\n", + " mesh['error_magnitudes'] = error_magnitudes\n", + " \n", + " # --- Create all three sets of glyphs ---\n", + " glyphs = mesh.glyph(orient='vectors1', scale='scalars1', factor=2, geom=geom)\n", + " glyphs2 = mesh.glyph(orient='vectors2', scale='scalars2', factor=0.05, geom=geom)\n", + " # NEW: Create glyphs for the error vectors. Using a larger factor to make them visible.\n", + " error_glyphs = mesh.glyph(orient='error_vectors', scale='error_magnitudes', factor=10, geom=geom)\n", + " \n", + " # Clear previous meshes from all subplots\n", + " pl.clear()\n", + " \n", + " # --- Add meshes to the three subplots ---\n", + " # Subplot 1: Ground Truth\n", + " pl.subplot(0, 0)\n", + " pl.add_mesh(glyphs, show_scalar_bar=True, lighting=False, cmap='viridis', scalar_bar_args=sargs_true)\n", + " pl.add_text(f\"Ground Truth - Time: {i}\", position='upper_edge', font_size=10)\n", + " \n", + " # Subplot 2: Prediction\n", + " pl.subplot(0, 1)\n", + " pl.add_mesh(glyphs2, show_scalar_bar=True, lighting=False, cmap='viridis', scalar_bar_args=sargs_pred)\n", + " pl.add_text(f\"Prediction - Time: {i}\", position='upper_edge', font_size=10)\n", + " \n", + " # NEW: Subplot 3: Prediction Error\n", + " pl.subplot(0, 2)\n", + " pl.add_mesh(error_glyphs, scalars='error_magnitudes', show_scalar_bar=True, lighting=False, cmap='Reds', scalar_bar_args=sargs_error)\n", + " pl.add_text(f\"Prediction Error - Time: {i}\", position='upper_edge', font_size=10)\n", + " \n", + " # Write the combined frame to the GIF\n", + " pl.write_frame()\n", + " \n", + " print(f\"Processing frame for time step {i}\", end='\\r')\n", + "\n", + "# Close the GIF writer\n", + "pl.close()\n", + "print(f\"\\nGIF saved to: {gif_path_3}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "graph_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.18" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/cleanroom/surrogateAI/__pycache__/clean_eval.cpython-310.pyc b/cleanroom/surrogateAI/__pycache__/clean_eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fd6113e682763673a3dd5560533583d4ad7165f Binary files /dev/null and b/cleanroom/surrogateAI/__pycache__/clean_eval.cpython-310.pyc differ diff --git a/cleanroom/surrogateAI/clean_eval.py b/cleanroom/surrogateAI/clean_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..6322070c16bf19ccf8114a8e4bd012bdff61878d --- /dev/null +++ b/cleanroom/surrogateAI/clean_eval.py @@ -0,0 +1,105 @@ +import torch +import torch.nn as nn + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +def _rollout(model, initial_state, num_steps, norm_stats): + """ + Performs a multi-step rollout simulation using the trained model. + The model's output from one step is used as the input for the next. + Applies normalization to inputs and de-normalization to outputs. + """ + # --- UNPACK NORMALIZATION STATS --- + coords_mean = norm_stats['coords_mean'].to(device) + coords_std = norm_stats['coords_std'].to(device) + vel_mean = norm_stats['vel_mean'].to(device) + vel_std = norm_stats['vel_std'].to(device) + inlet_vel_mean = norm_stats['inlet_vel_mean'].to(device) + inlet_vel_std = norm_stats['inlet_vel_std'].to(device) + epsilon = 1e-7 + + predicted_trajectory = [] + + current_velocities = initial_state['current_velocities'].float().to(device) + coordinates = initial_state['coordinates'].float().to(device) + node_type = initial_state['node_type'].float().to(device) + + for step in range(num_steps): + raw_vel = initial_state['meta_info']['velocity'] + if torch.is_tensor(raw_vel): + inlet_vel = raw_vel.clone().detach().float().to(device) + else: + inlet_vel = torch.tensor(raw_vel, dtype=torch.float32).to(device) + + if inlet_vel.dim() > 1: + inlet_vel = inlet_vel.flatten()[0] + elif inlet_vel.dim() == 1 and inlet_vel.numel() == 1: + inlet_vel = inlet_vel.item() + + if isinstance(inlet_vel, torch.Tensor): + norm_inlet_scalar = (inlet_vel - inlet_vel_mean) / (inlet_vel_std + epsilon) + else: + norm_inlet_scalar = (inlet_vel - inlet_vel_mean.item()) / (inlet_vel_std.item() + epsilon) + + num_nodes = coordinates.shape[0] + + if isinstance(norm_inlet_scalar, torch.Tensor): + norm_inlet_vel = norm_inlet_scalar.expand(num_nodes).float() + else: + norm_inlet_vel = torch.full((num_nodes,), float(norm_inlet_scalar), dtype=torch.float32, device=device) + + norm_coords = (coordinates - coords_mean) / (coords_std + epsilon) + norm_current_vel = (current_velocities - vel_mean) / (vel_std + epsilon) + + norm_coords_batch = norm_coords.unsqueeze(0) + node_type_batch = node_type.unsqueeze(0) + norm_current_vel_batch = norm_current_vel.unsqueeze(0) + norm_inlet_vel_feature_batch = norm_inlet_vel.unsqueeze(0).unsqueeze(-1) + + final_input = torch.cat([ + norm_coords_batch, + node_type_batch, + norm_current_vel_batch, + norm_inlet_vel_feature_batch + ], dim=-1) + + with torch.no_grad(): + predicted_normalized_velocities = model(final_input).squeeze(0) + + predicted_next_velocities = (predicted_normalized_velocities * (vel_std + epsilon)) + vel_mean + + predicted_trajectory.append(predicted_next_velocities) + + current_velocities = predicted_next_velocities + + return torch.stack(predicted_trajectory, dim=0) + +def evaluate(model, trajectory_data, norm_stats): + """ + Takes a full validation trajectory, normalizes it, and generates a model rollout prediction. + + Args: + model: The trained Transolver model. + trajectory_data: A dictionary containing the full, grouped validation trajectory. + norm_stats: A dictionary with the mean and std of the training data. + """ + num_steps = trajectory_data['current_velocities'].shape[0] + + initial_state = { + 'coordinates': trajectory_data['coordinates'].float(), + 'node_type': trajectory_data['node_type'].float(), + 'current_velocities': trajectory_data['current_velocities'][0].float(), # First step + 'meta_info': trajectory_data['meta_info'] + } + + predicted_trajectory = _rollout(model, initial_state, num_steps, norm_stats) + + results = { + 'ground_truth_trajectory': trajectory_data['target_velocities'].float(), + 'predicted_trajectory': predicted_trajectory, + 'coordinates': trajectory_data['coordinates'].float(), + 'node_type': trajectory_data['node_type'].float(), + 'meta_info': trajectory_data['meta_info'] + } + + return None, results \ No newline at end of file diff --git a/cleanroom/surrogateAI/opening.py b/cleanroom/surrogateAI/opening.py new file mode 100644 index 0000000000000000000000000000000000000000..b54c7093e49f774579696a8281ed5c612afc145e --- /dev/null +++ b/cleanroom/surrogateAI/opening.py @@ -0,0 +1,6 @@ +import numpy as np +file_path = '/home/gd_user1/AnK/project_PINN/cleanroom/second_output/rollout_results_epoch_130.npy' +data = np.load(file_path,allow_pickle=True) +print(data[0]['meta_info']['velocity']) +print(data[0]['ground_truth_trajectory'][1]) +print(data[0]['predicted_trajectory'][1]) \ No newline at end of file diff --git a/cleanroom/surrogateAI/second_train.py b/cleanroom/surrogateAI/second_train.py new file mode 100644 index 0000000000000000000000000000000000000000..a5160b7ac81156a99ba02764d534a22df19b4085 --- /dev/null +++ b/cleanroom/surrogateAI/second_train.py @@ -0,0 +1,551 @@ +import os +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader +from timm.models.layers import trunc_normal_ +from einops import rearrange +import numpy as np +from typing import List, Dict, Any +import matplotlib.pyplot as plt +import json +import random +import time +from clean_eval import evaluate as rollout_evaluate + +# ============================================================================ +# 1. PYTORCH DATASET CLASS FOR TIME-STEPPED DATA +# ============================================================================ +class TimeStepDataset(Dataset): + """Dataset for flattened timestep training data""" + + def __init__(self, pt_file_path: str): + if not os.path.exists(pt_file_path): + raise FileNotFoundError(f"Dataset file not found at: {pt_file_path}") + self.data = torch.load(pt_file_path, weights_only=False) + print(f"Loaded {len(self.data)} timesteps from {pt_file_path}") + + def __len__(self) -> int: + return len(self.data) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + sample = self.data[idx] + return { + 'coordinates': sample['coordinates'], + 'node_type': sample['node_type'], + 'current_velocities': sample['current_velocities'], + 'target_velocities': sample['target_velocities'], + 'meta_info': sample['meta_info'] + } + +class TrajectoryDataset(Dataset): + """Dataset for full trajectory evaluation data""" + + def __init__(self, pt_file_path: str): + if not os.path.exists(pt_file_path): + raise FileNotFoundError(f"Dataset file not found at: {pt_file_path}") + self.trajectories = torch.load(pt_file_path, weights_only=False) + print(f"Loaded {len(self.trajectories)} trajectories from {pt_file_path}") + + def __len__(self) -> int: + return len(self.trajectories) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + return self.trajectories[idx] + +# ============================================================================ +# 2. TRANSOLVER MODEL ARCHITECTURE (Adapted for Time-Dependent Prediction) +# ============================================================================ +ACTIVATION = {'gelu': nn.GELU, 'tanh': nn.Tanh, 'sigmoid': nn.Sigmoid, 'relu': nn.ReLU, 'leaky_relu': nn.LeakyReLU(0.1)} + +class MLP(nn.Module): + def __init__(self, n_input, n_hidden, n_output, n_layers=1, act='gelu', res=True): + super(MLP, self).__init__() + if act in ACTIVATION.keys(): + act = ACTIVATION[act] + else: + raise NotImplementedError + self.linear_pre = nn.Sequential(nn.Linear(n_input, n_hidden), act()) + self.linear_post = nn.Linear(n_hidden, n_output) + self.linears = nn.ModuleList([nn.Sequential(nn.Linear(n_hidden, n_hidden), act()) for _ in range(n_layers)]) + self.res = res + + def forward(self, x): + x = self.linear_pre(x) + for i in range(len(self.linears)): + if self.res: + x = self.linears[i](x) + x + else: + x = self.linears[i](x) + x = self.linear_post(x) + return x + +class Physics_Attention_Irregular_Mesh(nn.Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0., slice_num=64): + super().__init__() + inner_dim = dim_head * heads + self.dim_head = dim_head + self.heads = heads + self.scale = dim_head ** -0.5 + self.softmax = nn.Softmax(dim=-1) + self.dropout = nn.Dropout(dropout) + self.temperature = nn.Parameter(torch.ones([1, heads, 1, 1]) * 0.5) + self.in_project_x = nn.Linear(dim, inner_dim) + self.in_project_fx = nn.Linear(dim, inner_dim) + self.in_project_slice = nn.Linear(dim_head, slice_num) + torch.nn.init.orthogonal_(self.in_project_slice.weight) + self.to_q = nn.Linear(dim_head, dim_head, bias=False) + self.to_k = nn.Linear(dim_head, dim_head, bias=False) + self.to_v = nn.Linear(dim_head, dim_head, bias=False) + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) + + def forward(self, x): + B, N, C = x.shape + fx_mid = self.in_project_fx(x).reshape(B, N, self.heads, self.dim_head).permute(0, 2, 1, 3).contiguous() + x_mid = self.in_project_x(x).reshape(B, N, self.heads, self.dim_head).permute(0, 2, 1, 3).contiguous() + slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) + slice_norm = slice_weights.sum(2) + slice_token = torch.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) + slice_token = slice_token / ((slice_norm + 1e-5)[:, :, :, None].repeat(1, 1, 1, self.dim_head)) + q_slice_token = self.to_q(slice_token) + k_slice_token = self.to_k(slice_token) + v_slice_token = self.to_v(slice_token) + dots = torch.matmul(q_slice_token, k_slice_token.transpose(-1, -2)) * self.scale + attn = self.softmax(dots) + attn = self.dropout(attn) + out_slice_token = torch.matmul(attn, v_slice_token) + out_x = torch.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) + out_x = rearrange(out_x, 'b h n d -> b n (h d)') + out = self.to_out(out_x) + return out + +class Transolver_block(nn.Module): + def __init__(self, num_heads, hidden_dim, dropout, act='gelu', mlp_ratio=4, last_layer=False, out_dim=1, slice_num=32): + super().__init__() + self.last_layer = last_layer + self.ln_1 = nn.LayerNorm(hidden_dim) + self.Attn = Physics_Attention_Irregular_Mesh(hidden_dim, heads=num_heads, dim_head=hidden_dim // num_heads, dropout=dropout, slice_num=slice_num) + self.ln_2 = nn.LayerNorm(hidden_dim) + self.mlp = MLP(hidden_dim, hidden_dim * mlp_ratio, hidden_dim, n_layers=0, res=False, act=act) + if self.last_layer: + self.ln_3 = nn.LayerNorm(hidden_dim) + self.mlp2 = nn.Linear(hidden_dim, out_dim) + + def forward(self, fx): + fx2 = self.Attn(self.ln_1(fx)) + fx + fx = self.mlp(self.ln_2(fx2)) + fx2 + if self.last_layer: + return self.mlp2(self.ln_3(fx)) + return fx + +class Model(nn.Module): + def __init__(self, in_dim=13, out_dim=3, n_layers=8, n_hidden=256, dropout=0, n_head=8, act='gelu', mlp_ratio=2, slice_num=32): + super(Model, self).__init__() + self.preprocess = MLP(in_dim, n_hidden * 2, n_hidden, n_layers=0, res=False, act=act) + self.n_hidden = n_hidden + self.blocks = nn.ModuleList([Transolver_block(num_heads=n_head, hidden_dim=n_hidden, dropout=dropout, + act=act, mlp_ratio=mlp_ratio, + out_dim=out_dim, + slice_num=slice_num, + last_layer=(_ == n_layers - 1)) + for _ in range(n_layers)]) + self.initialize_weights() + self.placeholder = nn.Parameter((1 / n_hidden) * torch.rand(n_hidden, dtype=torch.float)) + + def initialize_weights(self): + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + fx = self.preprocess(x) + fx = fx + self.placeholder[None, None, :] + for block in self.blocks: + fx = block(fx) + return fx + +# ============================================================================ +# 3. CHECKPOINTING FUNCTIONS +# ============================================================================ +def save_checkpoint(model, optimizer, scheduler, epoch, loss, path, norm_stats): + """Save training checkpoint""" + checkpoint = { + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + 'scheduler_state_dict': scheduler.state_dict() if scheduler else None, + 'loss': loss, + 'norm_stats': norm_stats # Save normalization stats + } + torch.save(checkpoint, path) + print(f"Checkpoint saved: {path}") + +def load_checkpoint(model, optimizer, scheduler, path): + """Load training checkpoint""" + checkpoint = torch.load(path, map_location='cpu') + model.load_state_dict(checkpoint['model_state_dict']) + optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + if scheduler and checkpoint.get('scheduler_state_dict'): + scheduler.load_state_dict(checkpoint['scheduler_state_dict']) + + # Load norm_stats if they exist, otherwise return None + norm_stats = checkpoint.get('norm_stats', None) + if norm_stats is None: + print("Warning: Normalization stats not found in checkpoint.") + + return checkpoint['epoch'], checkpoint['loss'], norm_stats + +# ============================================================================ +# 4. EVALUATION FUNCTIONS +# ============================================================================ +def create_wall_mask(node_type): + """Create mask to ignore wall nodes during loss calculation""" + wall_class_index = 5 # Assuming walls are class 5 + node_class_indices = torch.argmax(node_type, dim=-1) + mask = (node_class_indices != wall_class_index).float().unsqueeze(-1) + return mask + +def evaluate_rollout(model, trajectory_loader, device, norm_stats): + """Run rollout evaluation on full trajectories""" + model.eval() + results = [] + + print(f"Starting rollout evaluation on {len(trajectory_loader)} trajectories...") + + with torch.no_grad(): + for i, trajectory in enumerate(trajectory_loader): + try: + print(f"\nProcessing trajectory {i+1}/{len(trajectory_loader)}") + + # Since trajectory_loader has batch_size=1, we need to extract the single trajectory + single_trajectory = {} + for key, value in trajectory.items(): + if key != 'meta_info': + # Remove the batch dimension + single_trajectory[key] = value.squeeze(0) + else: + # meta_info is special - it's a dict where each value is a list/tensor due to batching + # We need to extract the first element from each value in the meta_info dict + single_trajectory[key] = {} + for meta_key, meta_value in value.items(): + if isinstance(meta_value, (list, tuple)): + single_trajectory[key][meta_key] = meta_value[0] + elif isinstance(meta_value, dict): + # Handle nested dictionaries (like node_type_counts) + single_trajectory[key][meta_key] = {} + for nested_key, nested_value in meta_value.items(): + if isinstance(nested_value, (list, tuple)): + single_trajectory[key][meta_key][nested_key] = nested_value[0] + elif torch.is_tensor(nested_value) and nested_value.numel() > 1: + single_trajectory[key][meta_key][nested_key] = nested_value[0] + else: + single_trajectory[key][meta_key][nested_key] = nested_value + elif torch.is_tensor(meta_value) and meta_value.numel() > 1: + # For tensors with more than one element, take the first element + single_trajectory[key][meta_key] = meta_value[0] + else: + # For scalars or other types, keep as is + single_trajectory[key][meta_key] = meta_value + + # Pass the normalization statistics to the evaluation function + _, traj_result = rollout_evaluate(model, single_trajectory, norm_stats) + results.append(traj_result) + print(f"Successfully processed trajectory {i+1}") + + except Exception as e: + print(f"Error processing trajectory {i+1}: {str(e)}") + print(f"Trajectory data shapes:") + for key, value in trajectory.items(): + if hasattr(value, 'shape'): + print(f" {key}: {value.shape}") + else: + print(f" {key}: {type(value)} - {value}") + raise e + + print(f"Completed rollout evaluation on {len(results)} trajectories") + return results +# ============================================================================ +# 5. MAIN TRAINING SCRIPT +# ============================================================================ +def main(): + script_start_time = time.time() + # --- CONFIGURATION --- + preprocessed_dir = r"/home/gd_user1/AnK/project_PINN/cleanroom/rawdataset" + base_filename = "final_data_timestep_data" + output_dir = r"/home/gd_user1/AnK/project_PINN/cleanroom/second_output/output_2" + + train_pt_path = os.path.join(preprocessed_dir, f"{base_filename}_train.pt") + val_pt_path = os.path.join(preprocessed_dir, f"{base_filename}_test.pt") + + os.makedirs(output_dir, exist_ok=True) + + hparams = { + 'lr': 0.0005, 'batch_size': 1, 'nb_epochs': 400, + 'in_dim': 13, # coordinates(3) + node_type(6) + current_vel(3) + inlet_vel(1) + 'out_dim': 3, 'n_hidden': 256, 'n_layers': 6, 'n_head': 8, 'slice_num': 32, + 'checkpoint_freq': 10, 'eval_freq': 10 + } + + # --- SETUP --- + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(f"Using device: {device}") + if torch.cuda.is_available(): + print(f"GPU: {torch.cuda.get_device_name(0)}") + print(f"CUDA version: {torch.version.cuda}") + + # Set random seeds for reproducibility + torch.manual_seed(42) + random.seed(42) + np.random.seed(42) + + # --- DATALOADERS --- + print("Loading datasets...") + train_dataset = TimeStepDataset(train_pt_path) + val_timestep_dataset = TimeStepDataset(val_pt_path) # For single-step validation + test_trajectory_dataset = TrajectoryDataset(val_pt_path) # For rollout evaluation + + train_loader = DataLoader(train_dataset, batch_size=hparams['batch_size'], shuffle=True) + val_timestep_loader = DataLoader(val_timestep_dataset, batch_size=hparams['batch_size'], shuffle=False) + test_trajectory_loader = DataLoader(test_trajectory_dataset, batch_size=1, shuffle=False) + + # --- COMPUTE NORMALIZATION STATISTICS FROM TRAINING DATA --- + def compute_normalization_stats(train_dataset): + """Compute normalization statistics from training dataset""" + coords_list = [] + vel_list = [] + inlet_vel_list = [] + + print("Computing normalization statistics from training data...") + + for sample in train_dataset: + coords_list.append(sample['coordinates']) + vel_list.append(sample['current_velocities']) + # Handle inlet velocity (it's in meta_info and may be tensor or scalar) + inlet_vel = sample['meta_info']['velocity'] + if torch.is_tensor(inlet_vel): + inlet_vel_list.append(inlet_vel.item()) + else: + inlet_vel_list.append(float(inlet_vel)) + + # Stack and compute statistics + coords_all = torch.stack(coords_list) # [num_samples, num_nodes, 3] + vel_all = torch.stack(vel_list) # [num_samples, num_nodes, 3] + inlet_vel_all = torch.tensor(inlet_vel_list) # [num_samples] + + stats = { + 'coords_mean': coords_all.mean(dim=[0, 1]), # Mean across samples and nodes + 'coords_std': coords_all.std(dim=[0, 1]), + 'vel_mean': vel_all.mean(dim=[0, 1]), + 'vel_std': vel_all.std(dim=[0, 1]), + 'inlet_vel_mean': inlet_vel_all.mean(), + 'inlet_vel_std': inlet_vel_all.std() + } + + print(f"Computed stats - Coords mean: {stats['coords_mean']}, std: {stats['coords_std']}") + print(f"Velocities mean: {stats['vel_mean']}, std: {stats['vel_std']}") + print(f"Inlet vel mean: {stats['inlet_vel_mean']:.6f}, std: {stats['inlet_vel_std']:.6f}") + + return stats + + # Compute normalization statistics + norm_stats = compute_normalization_stats(train_dataset) + coords_mean = norm_stats['coords_mean'].to(device) + coords_std = norm_stats['coords_std'].to(device) + vel_mean = norm_stats['vel_mean'].to(device) + vel_std = norm_stats['vel_std'].to(device) + inlet_vel_mean = norm_stats['inlet_vel_mean'].to(device) + inlet_vel_std = norm_stats['inlet_vel_std'].to(device) + epsilon = 1e-8 # Small constant to prevent division by zero + + # --- MODEL, OPTIMIZER, SCHEDULER SETUP --- + model = Model(in_dim=hparams['in_dim'], out_dim=hparams['out_dim'], + n_hidden=hparams['n_hidden'], n_layers=hparams['n_layers'], + n_head=hparams['n_head'], slice_num=hparams['slice_num']).to(device) + + optimizer = torch.optim.Adam(model.parameters(), lr=hparams['lr']) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.9) + criterion = nn.MSELoss() + + # Initialize training variables + start_epoch = 0 + best_val_loss = float('inf') + + # Check for existing checkpoint to resume training + latest_checkpoint_path = os.path.join(output_dir, 'checkpoint_latest.pth') + if os.path.exists(latest_checkpoint_path): + print(f"Found existing checkpoint: {latest_checkpoint_path}") + try: + start_epoch, last_loss, loaded_norm_stats = load_checkpoint( + model, optimizer, scheduler, latest_checkpoint_path + ) + if loaded_norm_stats is not None: + norm_stats = loaded_norm_stats + # Update the normalization tensors + coords_mean = norm_stats['coords_mean'].to(device) + coords_std = norm_stats['coords_std'].to(device) + vel_mean = norm_stats['vel_mean'].to(device) + vel_std = norm_stats['vel_std'].to(device) + inlet_vel_mean = norm_stats['inlet_vel_mean'].to(device) + inlet_vel_std = norm_stats['inlet_vel_std'].to(device) + print("Loaded normalization statistics from checkpoint.") + print(f"Resuming training from epoch {start_epoch}") + best_val_loss = last_loss + except Exception as e: + print(f"Error loading checkpoint: {e}") + print("Starting fresh training...") + start_epoch = 0 + best_val_loss = float('inf') + + print(f"Starting training from epoch {start_epoch} to {hparams['nb_epochs']}") + print("="*60) + + train_loss_history = [] + val_loss_history = [] + lr_history = [] + val_epochs = [] + + # --- TRAINING LOOP --- + for epoch in range(start_epoch, hparams['nb_epochs']): + epoch_start_time = time.time() + model.train() + total_train_loss = 0.0 + + for batch_idx, batch in enumerate(train_loader): + coordinates = batch['coordinates'].to(device) + node_type = batch['node_type'].to(device) + current_velocities = batch['current_velocities'].to(device) + target_velocities = batch['target_velocities'].to(device) + + inlet_velocities = batch['meta_info']['velocity'] + if torch.is_tensor(inlet_velocities): + inlet_vel_tensor = inlet_velocities.clone().detach().float().to(device) + else: + inlet_vel_tensor = torch.tensor(inlet_velocities, dtype=torch.float32).to(device) + + # --- NORMALIZE FEATURES --- + norm_coords = (coordinates - coords_mean) / (coords_std + epsilon) + norm_current_vel = (current_velocities - vel_mean) / (vel_std + epsilon) + norm_target_vel = (target_velocities - vel_mean) / (vel_std + epsilon) + norm_inlet_vel = (inlet_vel_tensor - inlet_vel_mean) / (inlet_vel_std + epsilon) + + batch_size, num_nodes, _ = coordinates.shape + inlet_vel_feature = norm_inlet_vel.view(batch_size, 1, 1).expand(batch_size, num_nodes, 1) + + # Input: [normalized coordinates, node_type, normalized velocities, normalized inlet_velocity] + input_features = torch.cat([norm_coords, node_type, norm_current_vel, inlet_vel_feature], dim=-1) + + optimizer.zero_grad() + predicted_normalized_velocities = model(input_features) + + # Apply wall masking + mask = create_wall_mask(node_type) + loss = criterion(predicted_normalized_velocities * mask, norm_target_vel * mask) + loss.backward() + optimizer.step() + + total_train_loss += loss.item() + + # Progress update every 50 batches + if batch_idx % 50 == 0: + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} | Batch {batch_idx+1}/{len(train_loader)} | Loss: {loss.item():.6f}") + + scheduler.step() + lr_history.append(optimizer.param_groups[0]['lr']) + epoch_time = time.time() - epoch_start_time + avg_train_loss = total_train_loss / len(train_loader) + train_loss_history.append(avg_train_loss) + + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} completed | Avg Train Loss: {avg_train_loss:.6f} | Time: {epoch_time:.2f}s") + + # --- PERIODIC EVALUATION --- + if (epoch + 1) % hparams['eval_freq'] == 0 or epoch == hparams['nb_epochs'] - 1: + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} | Train Loss: {avg_train_loss:.6f}") + + # Rollout evaluation + print(" Running rollout evaluation...") + rollout_results = evaluate_rollout(model, test_trajectory_loader, device, norm_stats) + + # Calculate loss from rollout + total_rollout_mse = 0.0 + for result in rollout_results: + # Move tensors to the correct device + pred_traj = result['predicted_trajectory'].to(device) + gt_traj = result['ground_truth_trajectory'].to(device) + node_type = result['node_type'].to(device) + + # Create a mask to ignore wall nodes + mask = create_wall_mask(node_type).unsqueeze(0) # Add time dim for broadcasting + + # Calculate masked MSE + loss = criterion(pred_traj * mask, gt_traj * mask) + total_rollout_mse += loss.item() + + avg_rollout_mse = total_rollout_mse / len(rollout_results) + + val_loss = avg_rollout_mse + val_loss_history.append(val_loss) + val_epochs.append(epoch + 1) + print(f" Validation Rollout MSE: {val_loss:.6f}") + + # Save rollout results + rollout_path = os.path.join(output_dir, f"rollout_results_epoch_{epoch+1}.npy") + np.save(rollout_path, rollout_results, allow_pickle=True) # allow_pickle is good practice for np.save with dicts + print(f" -> Saved rollout results to {rollout_path}") + + # Save best model based on rollout loss + if val_loss < best_val_loss: + best_val_loss = val_loss + best_model_path = os.path.join(output_dir, 'best_model.pth') + torch.save(model.state_dict(), best_model_path) + print(f" -> New best model saved with validation loss: {best_val_loss:.6f}") + + # --- PERIODIC CHECKPOINTING --- + if (epoch + 1) % hparams['checkpoint_freq'] == 0: + # Use validation loss if available, otherwise use training loss + checkpoint_loss = val_loss if 'val_loss' in locals() else avg_train_loss + checkpoint_path = os.path.join(output_dir, f'checkpoint_epoch_{epoch+1}.pth') + save_checkpoint(model, optimizer, scheduler, epoch + 1, checkpoint_loss, checkpoint_path, norm_stats) + + # --- SAVE LATEST CHECKPOINT --- + save_checkpoint(model, optimizer, scheduler, hparams['nb_epochs'], best_val_loss, + os.path.join(output_dir, 'checkpoint_latest.pth'), norm_stats) + + print("="*60) + print("Training complete.") + print(f"Best validation loss: {best_val_loss:.6f}") + + # --- PLOTTING AND SAVING LOSS HISTORY --- + plt.figure(figsize=(10, 5)) + plt.plot(train_loss_history, label='Training Loss') + plt.plot(val_epochs, val_loss_history, 'o-', label='Validation Loss') + plt.title('Training and Validation Loss Over Epochs') + plt.xlabel('Epoch') + plt.ylabel('Loss (MSE)') + plt.legend() + plt.grid(True) + plt.savefig(os.path.join(output_dir, 'training_loss_plot.png')) + print(f"Loss plot saved to {os.path.join(output_dir, 'training_loss_plot.png')}") + + loss_data = { + 'train_loss_history': train_loss_history, + 'val_loss_history': val_loss_history, + 'val_epochs': val_epochs, + 'lr_history': lr_history, + 'best_val_loss': best_val_loss + } + with open(os.path.join(output_dir, 'loss_history.json'), 'w') as f: + json.dump(loss_data, f, indent=4) + print(f"Loss history saved to {os.path.join(output_dir, 'loss_history.json')}") + + script_end_time = time.time() + elapsed_time_seconds = script_end_time - script_start_time + print("="*60) + print(f"Total script execution time: {elapsed_time_seconds / 60:.2f} minutes ({elapsed_time_seconds:.2f} seconds)") +if __name__ == '__main__': + main() diff --git a/cleanroom/surrogateAI/time_series_train.py b/cleanroom/surrogateAI/time_series_train.py new file mode 100644 index 0000000000000000000000000000000000000000..3e4fdbf572bff5d01fcbcebf0f95fa0e937db3fb --- /dev/null +++ b/cleanroom/surrogateAI/time_series_train.py @@ -0,0 +1,556 @@ +import os +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader +from timm.models.layers import trunc_normal_ +from einops import rearrange +import numpy as np +from typing import List, Dict, Any +import matplotlib.pyplot as plt +import json +import random +import time +from clean_eval import evaluate as rollout_evaluate + +# ============================================================================ +# 1. PYTORCH DATASET CLASS FOR TIME-STEPPED DATA +# ============================================================================ +class TimeStepDataset(Dataset): + """Dataset for flattened timestep training data""" + + def __init__(self, pt_file_path: str): + if not os.path.exists(pt_file_path): + raise FileNotFoundError(f"Dataset file not found at: {pt_file_path}") + self.data = torch.load(pt_file_path, weights_only=False) + print(f"Loaded {len(self.data)} timesteps from {pt_file_path}") + + def __len__(self) -> int: + return len(self.data) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + sample = self.data[idx] + return { + 'coordinates': sample['coordinates'], + 'node_type': sample['node_type'], + 'current_velocities': sample['current_velocities'], + 'target_velocities': sample['target_velocities'], + 'meta_info': sample['meta_info'] + } + +class TrajectoryDataset(Dataset): + """Dataset for full trajectory evaluation data""" + + def __init__(self, pt_file_path: str): + if not os.path.exists(pt_file_path): + raise FileNotFoundError(f"Dataset file not found at: {pt_file_path}") + self.trajectories = torch.load(pt_file_path, weights_only=False) + print(f"Loaded {len(self.trajectories)} trajectories from {pt_file_path}") + + def __len__(self) -> int: + return len(self.trajectories) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + return self.trajectories[idx] + +# ============================================================================ +# 2. TRANSOLVER MODEL ARCHITECTURE (Adapted for Time-Dependent Prediction) +# ============================================================================ +ACTIVATION = {'gelu': nn.GELU, 'tanh': nn.Tanh, 'sigmoid': nn.Sigmoid, 'relu': nn.ReLU, 'leaky_relu': nn.LeakyReLU(0.1)} + +class MLP(nn.Module): + def __init__(self, n_input, n_hidden, n_output, n_layers=1, act='gelu', res=True): + super(MLP, self).__init__() + if act in ACTIVATION.keys(): + act = ACTIVATION[act] + else: + raise NotImplementedError + self.linear_pre = nn.Sequential(nn.Linear(n_input, n_hidden), act()) + self.linear_post = nn.Linear(n_hidden, n_output) + self.linears = nn.ModuleList([nn.Sequential(nn.Linear(n_hidden, n_hidden), act()) for _ in range(n_layers)]) + self.res = res + + def forward(self, x): + x = self.linear_pre(x) + for i in range(len(self.linears)): + if self.res: + x = self.linears[i](x) + x + else: + x = self.linears[i](x) + x = self.linear_post(x) + return x + +class Physics_Attention_Irregular_Mesh(nn.Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0., slice_num=64): + super().__init__() + inner_dim = dim_head * heads + self.dim_head = dim_head + self.heads = heads + self.scale = dim_head ** -0.5 + self.softmax = nn.Softmax(dim=-1) + self.dropout = nn.Dropout(dropout) + self.temperature = nn.Parameter(torch.ones([1, heads, 1, 1]) * 0.5) + self.in_project_x = nn.Linear(dim, inner_dim) + self.in_project_fx = nn.Linear(dim, inner_dim) + self.in_project_slice = nn.Linear(dim_head, slice_num) + torch.nn.init.orthogonal_(self.in_project_slice.weight) + self.to_q = nn.Linear(dim_head, dim_head, bias=False) + self.to_k = nn.Linear(dim_head, dim_head, bias=False) + self.to_v = nn.Linear(dim_head, dim_head, bias=False) + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) + + def forward(self, x): + B, N, C = x.shape + fx_mid = self.in_project_fx(x).reshape(B, N, self.heads, self.dim_head).permute(0, 2, 1, 3).contiguous() + x_mid = self.in_project_x(x).reshape(B, N, self.heads, self.dim_head).permute(0, 2, 1, 3).contiguous() + slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) + slice_norm = slice_weights.sum(2) + slice_token = torch.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) + slice_token = slice_token / ((slice_norm + 1e-5)[:, :, :, None].repeat(1, 1, 1, self.dim_head)) + q_slice_token = self.to_q(slice_token) + k_slice_token = self.to_k(slice_token) + v_slice_token = self.to_v(slice_token) + dots = torch.matmul(q_slice_token, k_slice_token.transpose(-1, -2)) * self.scale + attn = self.softmax(dots) + attn = self.dropout(attn) + out_slice_token = torch.matmul(attn, v_slice_token) + out_x = torch.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) + out_x = rearrange(out_x, 'b h n d -> b n (h d)') + out = self.to_out(out_x) + return out + +class Transolver_block(nn.Module): + def __init__(self, num_heads, hidden_dim, dropout, act='gelu', mlp_ratio=4, last_layer=False, out_dim=1, slice_num=32): + super().__init__() + self.last_layer = last_layer + self.ln_1 = nn.LayerNorm(hidden_dim) + self.Attn = Physics_Attention_Irregular_Mesh(hidden_dim, heads=num_heads, dim_head=hidden_dim // num_heads, dropout=dropout, slice_num=slice_num) + self.ln_2 = nn.LayerNorm(hidden_dim) + self.mlp = MLP(hidden_dim, hidden_dim * mlp_ratio, hidden_dim, n_layers=0, res=False, act=act) + if self.last_layer: + self.ln_3 = nn.LayerNorm(hidden_dim) + self.mlp2 = nn.Linear(hidden_dim, out_dim) + + def forward(self, fx): + fx2 = self.Attn(self.ln_1(fx)) + fx + fx = self.mlp(self.ln_2(fx2)) + fx2 + if self.last_layer: + return self.mlp2(self.ln_3(fx)) + return fx + +class Model(nn.Module): + def __init__(self, in_dim=13, out_dim=3, n_layers=8, n_hidden=256, dropout=0, n_head=8, act='gelu', mlp_ratio=2, slice_num=32): + super(Model, self).__init__() + self.preprocess = MLP(in_dim, n_hidden * 2, n_hidden, n_layers=0, res=False, act=act) + self.n_hidden = n_hidden + self.blocks = nn.ModuleList([Transolver_block(num_heads=n_head, hidden_dim=n_hidden, dropout=dropout, + act=act, mlp_ratio=mlp_ratio, + out_dim=out_dim, + slice_num=slice_num, + last_layer=(_ == n_layers - 1)) + for _ in range(n_layers)]) + self.initialize_weights() + self.placeholder = nn.Parameter((1 / n_hidden) * torch.rand(n_hidden, dtype=torch.float)) + + def initialize_weights(self): + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.LayerNorm, nn.BatchNorm1d)): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + fx = self.preprocess(x) + fx = fx + self.placeholder[None, None, :] + for block in self.blocks: + fx = block(fx) + return fx + +# ============================================================================ +# 3. CHECKPOINTING FUNCTIONS +# ============================================================================ +def save_checkpoint(model, optimizer, scheduler, epoch, loss, path, norm_stats): + """Save training checkpoint""" + checkpoint = { + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + 'scheduler_state_dict': scheduler.state_dict() if scheduler else None, + 'loss': loss, + 'norm_stats': norm_stats # Save normalization stats + } + torch.save(checkpoint, path) + print(f"Checkpoint saved: {path}") + +def load_checkpoint(model, optimizer, scheduler, path): + """Load training checkpoint""" + checkpoint = torch.load(path, map_location='cpu') + model.load_state_dict(checkpoint['model_state_dict']) + optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + if scheduler and checkpoint.get('scheduler_state_dict'): + scheduler.load_state_dict(checkpoint['scheduler_state_dict']) + + # Load norm_stats if they exist, otherwise return None + norm_stats = checkpoint.get('norm_stats', None) + if norm_stats is None: + print("Warning: Normalization stats not found in checkpoint.") + + return checkpoint['epoch'], checkpoint['loss'], norm_stats + +# ============================================================================ +# 4. EVALUATION FUNCTIONS +# ============================================================================ +def create_wall_mask(node_type): + """Create mask to ignore wall nodes during loss calculation""" + wall_class_index = 5 # Assuming walls are class 5 + node_class_indices = torch.argmax(node_type, dim=-1) + mask = (node_class_indices != wall_class_index).float().unsqueeze(-1) + return mask + +def evaluate_rollout(model, trajectory_loader, device, norm_stats): + """Run rollout evaluation on full trajectories""" + model.eval() + results = [] + + print(f"Starting rollout evaluation on {len(trajectory_loader)} trajectories...") + + with torch.no_grad(): + for i, trajectory in enumerate(trajectory_loader): + try: + print(f"\nProcessing trajectory {i+1}/{len(trajectory_loader)}") + + # Since trajectory_loader has batch_size=1, we need to extract the single trajectory + single_trajectory = {} + for key, value in trajectory.items(): + if key != 'meta_info': + # Remove the batch dimension + single_trajectory[key] = value.squeeze(0) + else: + # meta_info is special - it's a dict where each value is a list/tensor due to batching + # We need to extract the first element from each value in the meta_info dict + single_trajectory[key] = {} + for meta_key, meta_value in value.items(): + if isinstance(meta_value, (list, tuple)): + single_trajectory[key][meta_key] = meta_value[0] + elif isinstance(meta_value, dict): + # Handle nested dictionaries (like node_type_counts) + single_trajectory[key][meta_key] = {} + for nested_key, nested_value in meta_value.items(): + if isinstance(nested_value, (list, tuple)): + single_trajectory[key][meta_key][nested_key] = nested_value[0] + elif torch.is_tensor(nested_value) and nested_value.numel() > 1: + single_trajectory[key][meta_key][nested_key] = nested_value[0] + else: + single_trajectory[key][meta_key][nested_key] = nested_value + elif torch.is_tensor(meta_value) and meta_value.numel() > 1: + # For tensors with more than one element, take the first element + single_trajectory[key][meta_key] = meta_value[0] + else: + # For scalars or other types, keep as is + single_trajectory[key][meta_key] = meta_value + + # Pass the normalization statistics to the evaluation function + _, traj_result = rollout_evaluate(model, single_trajectory, norm_stats) + results.append(traj_result) + print(f"Successfully processed trajectory {i+1}") + + except Exception as e: + print(f"Error processing trajectory {i+1}: {str(e)}") + print(f"Trajectory data shapes:") + for key, value in trajectory.items(): + if hasattr(value, 'shape'): + print(f" {key}: {value.shape}") + else: + print(f" {key}: {type(value)} - {value}") + raise e + + print(f"Completed rollout evaluation on {len(results)} trajectories") + return results +# ============================================================================ +# 5. MAIN TRAINING SCRIPT +# ============================================================================ +def main(): + script_start_time = time.time() + # --- CONFIGURATION --- + preprocessed_dir = r"/home/gd_user1/AnK/project_PINN/cleanroom/rawdataset" + base_filename = "final_data_timestep_data" + output_dir = r"/home/gd_user1/AnK/project_PINN/cleanroom/outputs_800" + + train_pt_path = os.path.join(preprocessed_dir, f"{base_filename}_train.pt") + val_pt_path = os.path.join(preprocessed_dir, f"{base_filename}_test.pt") + + os.makedirs(output_dir, exist_ok=True) + + hparams = { + 'lr': 0.0005, 'batch_size': 4, 'nb_epochs': 800, + 'in_dim': 13, # coordinates(3) + node_type(6) + current_vel(3) + inlet_vel(1) + 'out_dim': 3, 'n_hidden': 256, 'n_layers': 6, 'n_head': 8, 'slice_num': 32, + 'checkpoint_freq': 10, 'eval_freq': 10 + } + + # --- SETUP --- + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(f"Using device: {device}") + if torch.cuda.is_available(): + print(f"GPU: {torch.cuda.get_device_name(0)}") + print(f"CUDA version: {torch.version.cuda}") + + # Set random seeds for reproducibility + torch.manual_seed(42) + random.seed(42) + np.random.seed(42) + + # --- DATALOADERS --- + print("Loading datasets...") + train_dataset = TimeStepDataset(train_pt_path) + val_timestep_dataset = TimeStepDataset(val_pt_path) # For single-step validation + test_trajectory_dataset = TrajectoryDataset(val_pt_path) # For rollout evaluation + + train_loader = DataLoader(train_dataset, batch_size=hparams['batch_size'], shuffle=True) + val_timestep_loader = DataLoader(val_timestep_dataset, batch_size=hparams['batch_size'], shuffle=False) + test_trajectory_loader = DataLoader(test_trajectory_dataset, batch_size=1, shuffle=False) + + # --- COMPUTE NORMALIZATION STATISTICS FROM TRAINING DATA --- + def compute_normalization_stats(train_dataset): + """Compute normalization statistics from training dataset""" + coords_list = [] + vel_list = [] + inlet_vel_list = [] + + print("Computing normalization statistics from training data...") + + for sample in train_dataset: + coords_list.append(sample['coordinates']) + vel_list.append(sample['current_velocities']) + # Handle inlet velocity (it's in meta_info and may be tensor or scalar) + inlet_vel = sample['meta_info']['velocity'] + if torch.is_tensor(inlet_vel): + inlet_vel_list.append(inlet_vel.item()) + else: + inlet_vel_list.append(float(inlet_vel)) + + # Stack and compute statistics + coords_all = torch.stack(coords_list) # [num_samples, num_nodes, 3] + vel_all = torch.stack(vel_list) # [num_samples, num_nodes, 3] + inlet_vel_all = torch.tensor(inlet_vel_list) # [num_samples] + + stats = { + 'coords_mean': coords_all.mean(dim=[0, 1]), # Mean across samples and nodes + 'coords_std': coords_all.std(dim=[0, 1]), + 'vel_mean': vel_all.mean(dim=[0, 1]), + 'vel_std': vel_all.std(dim=[0, 1]), + 'inlet_vel_mean': inlet_vel_all.mean(), + 'inlet_vel_std': inlet_vel_all.std() + } + + print(f"Computed stats - Coords mean: {stats['coords_mean']}, std: {stats['coords_std']}") + print(f"Velocities mean: {stats['vel_mean']}, std: {stats['vel_std']}") + print(f"Inlet vel mean: {stats['inlet_vel_mean']:.6f}, std: {stats['inlet_vel_std']:.6f}") + + return stats + + # Compute normalization statistics + norm_stats = compute_normalization_stats(train_dataset) + coords_mean = norm_stats['coords_mean'].to(device) + coords_std = norm_stats['coords_std'].to(device) + vel_mean = norm_stats['vel_mean'].to(device) + vel_std = norm_stats['vel_std'].to(device) + inlet_vel_mean = norm_stats['inlet_vel_mean'].to(device) + inlet_vel_std = norm_stats['inlet_vel_std'].to(device) + epsilon = 1e-8 # Small constant to prevent division by zero + + # --- MODEL, OPTIMIZER, SCHEDULER SETUP --- + model = Model(in_dim=hparams['in_dim'], out_dim=hparams['out_dim'], + n_hidden=hparams['n_hidden'], n_layers=hparams['n_layers'], + n_head=hparams['n_head'], slice_num=hparams['slice_num']).to(device) + + optimizer = torch.optim.Adam(model.parameters(), lr=hparams['lr']) + scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( + optimizer, + T_0=50, # Number of epochs for the first restart cycle + T_mult=2, # Factor to increase T_0 after each restart (2x longer each time) + eta_min=1e-7 # Minimum learning rate +) + criterion = nn.MSELoss() + + # Initialize training variables + start_epoch = 0 + best_val_loss = float('inf') + + # Check for existing checkpoint to resume training + latest_checkpoint_path = os.path.join(output_dir, 'checkpoint_latest.pth') + if os.path.exists(latest_checkpoint_path): + print(f"Found existing checkpoint: {latest_checkpoint_path}") + try: + start_epoch, last_loss, loaded_norm_stats = load_checkpoint( + model, optimizer, scheduler, latest_checkpoint_path + ) + if loaded_norm_stats is not None: + norm_stats = loaded_norm_stats + # Update the normalization tensors + coords_mean = norm_stats['coords_mean'].to(device) + coords_std = norm_stats['coords_std'].to(device) + vel_mean = norm_stats['vel_mean'].to(device) + vel_std = norm_stats['vel_std'].to(device) + inlet_vel_mean = norm_stats['inlet_vel_mean'].to(device) + inlet_vel_std = norm_stats['inlet_vel_std'].to(device) + print("Loaded normalization statistics from checkpoint.") + print(f"Resuming training from epoch {start_epoch}") + best_val_loss = last_loss + except Exception as e: + print(f"Error loading checkpoint: {e}") + print("Starting fresh training...") + start_epoch = 0 + best_val_loss = float('inf') + + print(f"Starting training from epoch {start_epoch} to {hparams['nb_epochs']}") + print("="*60) + + train_loss_history = [] + val_loss_history = [] + lr_history = [] + val_epochs = [] + + # --- TRAINING LOOP --- + for epoch in range(start_epoch, hparams['nb_epochs']): + epoch_start_time = time.time() + model.train() + total_train_loss = 0.0 + + for batch_idx, batch in enumerate(train_loader): + coordinates = batch['coordinates'].to(device) + node_type = batch['node_type'].to(device) + current_velocities = batch['current_velocities'].to(device) + target_velocities = batch['target_velocities'].to(device) + + inlet_velocities = batch['meta_info']['velocity'] + if torch.is_tensor(inlet_velocities): + inlet_vel_tensor = inlet_velocities.clone().detach().float().to(device) + else: + inlet_vel_tensor = torch.tensor(inlet_velocities, dtype=torch.float32).to(device) + + # --- NORMALIZE FEATURES --- + norm_coords = (coordinates - coords_mean) / (coords_std + epsilon) + norm_current_vel = (current_velocities - vel_mean) / (vel_std + epsilon) + norm_target_vel = (target_velocities - vel_mean) / (vel_std + epsilon) + norm_inlet_vel = (inlet_vel_tensor - inlet_vel_mean) / (inlet_vel_std + epsilon) + + batch_size, num_nodes, _ = coordinates.shape + inlet_vel_feature = norm_inlet_vel.view(batch_size, 1, 1).expand(batch_size, num_nodes, 1) + + # Input: [normalized coordinates, node_type, normalized velocities, normalized inlet_velocity] + input_features = torch.cat([norm_coords, node_type, norm_current_vel, inlet_vel_feature], dim=-1) + + optimizer.zero_grad() + predicted_normalized_velocities = model(input_features) + + # Apply wall masking + mask = create_wall_mask(node_type) + loss = criterion(predicted_normalized_velocities * mask, norm_target_vel * mask) + loss.backward() + optimizer.step() + + total_train_loss += loss.item() + + # Progress update every 50 batches + if batch_idx % 50 == 0: + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} | Batch {batch_idx+1}/{len(train_loader)} | Loss: {loss.item():.6f}") + + scheduler.step() + lr_history.append(optimizer.param_groups[0]['lr']) + epoch_time = time.time() - epoch_start_time + avg_train_loss = total_train_loss / len(train_loader) + train_loss_history.append(avg_train_loss) + + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} completed | Avg Train Loss: {avg_train_loss:.6f} | Time: {epoch_time:.2f}s") + + # --- PERIODIC EVALUATION --- + if (epoch + 1) % hparams['eval_freq'] == 0 or epoch == hparams['nb_epochs'] - 1: + print(f"Epoch {epoch+1}/{hparams['nb_epochs']} | Train Loss: {avg_train_loss:.6f}") + + # Rollout evaluation + print(" Running rollout evaluation...") + rollout_results = evaluate_rollout(model, test_trajectory_loader, device, norm_stats) + + # Calculate loss from rollout + total_rollout_mse = 0.0 + for result in rollout_results: + # Move tensors to the correct device + pred_traj = result['predicted_trajectory'].to(device) + gt_traj = result['ground_truth_trajectory'].to(device) + node_type = result['node_type'].to(device) + + # Create a mask to ignore wall nodes + mask = create_wall_mask(node_type).unsqueeze(0) # Add time dim for broadcasting + + # Calculate masked MSE + loss = criterion(pred_traj * mask, gt_traj * mask) + total_rollout_mse += loss.item() + + avg_rollout_mse = total_rollout_mse / len(rollout_results) + + val_loss = avg_rollout_mse + val_loss_history.append(val_loss) + val_epochs.append(epoch + 1) + print(f" Validation Rollout MSE: {val_loss:.6f}") + + # Save rollout results + rollout_path = os.path.join(output_dir, f"rollout_results_epoch_{epoch+1}.npy") + np.save(rollout_path, rollout_results, allow_pickle=True) # allow_pickle is good practice for np.save with dicts + print(f" -> Saved rollout results to {rollout_path}") + + # Save best model based on rollout loss + if val_loss < best_val_loss: + best_val_loss = val_loss + best_model_path = os.path.join(output_dir, 'best_model.pth') + torch.save(model.state_dict(), best_model_path) + print(f" -> New best model saved with validation loss: {best_val_loss:.6f}") + + # --- PERIODIC CHECKPOINTING --- + if (epoch + 1) % hparams['checkpoint_freq'] == 0: + # Use validation loss if available, otherwise use training loss + checkpoint_loss = val_loss if 'val_loss' in locals() else avg_train_loss + checkpoint_path = os.path.join(output_dir, f'checkpoint_epoch_{epoch+1}.pth') + save_checkpoint(model, optimizer, scheduler, epoch + 1, checkpoint_loss, checkpoint_path, norm_stats) + + # --- SAVE LATEST CHECKPOINT --- + save_checkpoint(model, optimizer, scheduler, hparams['nb_epochs'], best_val_loss, + os.path.join(output_dir, 'checkpoint_latest.pth'), norm_stats) + + print("="*60) + print("Training complete.") + print(f"Best validation loss: {best_val_loss:.6f}") + + # --- PLOTTING AND SAVING LOSS HISTORY --- + plt.figure(figsize=(10, 5)) + plt.plot(train_loss_history, label='Training Loss') + plt.plot(val_epochs, val_loss_history, 'o-', label='Validation Loss') + plt.title('Training and Validation Loss Over Epochs') + plt.xlabel('Epoch') + plt.ylabel('Loss (MSE)') + plt.legend() + plt.grid(True) + plt.savefig(os.path.join(output_dir, 'training_loss_plot.png')) + print(f"Loss plot saved to {os.path.join(output_dir, 'training_loss_plot.png')}") + + loss_data = { + 'train_loss_history': train_loss_history, + 'val_loss_history': val_loss_history, + 'val_epochs': val_epochs, + 'lr_history': lr_history, + 'best_val_loss': best_val_loss + } + with open(os.path.join(output_dir, 'loss_history.json'), 'w') as f: + json.dump(loss_data, f, indent=4) + print(f"Loss history saved to {os.path.join(output_dir, 'loss_history.json')}") + + script_end_time = time.time() + elapsed_time_seconds = script_end_time - script_start_time + print("="*60) + print(f"Total script execution time: {elapsed_time_seconds / 60:.2f} minutes ({elapsed_time_seconds:.2f} seconds)") +if __name__ == '__main__': + main() diff --git a/cleanroom/test_files/read_pt.py b/cleanroom/test_files/read_pt.py new file mode 100644 index 0000000000000000000000000000000000000000..563de90e6bb5f02810cdd84a887f7794bf70e5ba --- /dev/null +++ b/cleanroom/test_files/read_pt.py @@ -0,0 +1,23 @@ +import torch + +file_path = "/home/gd_user1/AnK/project_PINN/cleanroom/rawdataset/final_data_timestep_data_train.pt" + + +print(f"Reading data from '{file_path}'...") + +try: + # Use torch.load() to read the .pt file. + # This function deserializes the pickled object file and loads it into memory. + loaded_tensor = torch.load(file_path) + + print("Successfully loaded the tensor. Here is the content:") + print(loaded_tensor) + + # You can now work with the loaded data + print("\nType of the loaded data:", type(loaded_tensor)) + print("Shape of the loaded tensor:", loaded_tensor.shape) + +except FileNotFoundError: + print(f"Error: The file '{file_path}' was not found.") +except Exception as e: + print(f"An error occurred while reading the file: {e}") \ No newline at end of file