genai_test / train_utils /preprocess_s1.py
Kyo-Kai's picture
Upload 12 files
d824e43 verified
# preprocess_s1.py
import numpy as np
import os
from collections import defaultdict
from scipy.stats import zscore
from typing import List, Dict, Any, Tuple
def compute_sequence_lengths(dataset_folder="dataset", show_details=False):
"""
Compute and print min/max sequence lengths (nodes, elements) across all trusses in the dataset.
Uses only min/max n_div files per mode for efficiency.
Returns:
Dict with keys 'min_nodes', 'max_nodes', 'min_elements', 'max_elements'.
"""
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
if not npz_files:
raise ValueError(f"No .npz files found in '{dataset_folder}'.")
# First pass: compute min/max n_div per mode
min_max_div = defaultdict(lambda: (float('inf'), float('-inf')))
for f in npz_files:
parts = f[:-4].rsplit('_', 2) # e.g., ['truss_pratt', '15', '39']
if len(parts) == 3 and parts[0].startswith('truss_'):
mode = parts[0][6:] # Remove 'truss_'
n_div = int(parts[1])
min_d, max_d = min_max_div[mode]
min_max_div[mode] = (min(min_d, n_div), max(max_d, n_div))
# Second pass: collect one file per min/max per mode
min_div_files = defaultdict(list)
max_div_files = defaultdict(list)
for f in npz_files:
parts = f[:-4].rsplit('_', 2)
if len(parts) == 3 and parts[0].startswith('truss_'):
mode = parts[0][6:]
n_div = int(parts[1])
if n_div == min_max_div[mode][0]:
min_div_files[mode].append(f)
if n_div == min_max_div[mode][1]:
max_div_files[mode].append(f)
# Compute overall min/max sequence lengths by loading one min/max file per mode
min_n_nod = float('inf')
max_n_nod = 0
min_n_ele = float('inf')
max_n_ele = 0
for mode in sorted(min_max_div):
# For min
if min_div_files[mode]:
min_file = min_div_files[mode][0]
data_min = np.load(os.path.join(dataset_folder, min_file))
min_n_nod = min(min_n_nod, int(data_min['n_nod_tot']))
min_n_ele = min(min_n_ele, int(data_min['n_ele_tot']))
data_min.close()
# For max
if max_div_files[mode]:
max_file = max_div_files[mode][0]
data_max = np.load(os.path.join(dataset_folder, max_file))
max_n_nod = max(max_n_nod, int(data_max['n_nod_tot']))
max_n_ele = max(max_n_ele, int(data_max['n_ele_tot']))
data_max.close()
print(f"Overall min sequence lengths: nodes={min_n_nod}, elements={min_n_ele}")
print(f"Overall max sequence lengths: nodes={max_n_nod}, elements={max_n_ele}")
if show_details:
example_mode = next(iter(max_div_files))
example_file = max_div_files[example_mode][0]
example_data = np.load(os.path.join(dataset_folder, example_file))
print(f"\nExample data keys from '{example_file}': {example_data.files}")
for key in example_data.files:
print(f" - {key}: shape {example_data[key].shape}, dtype {example_data[key].dtype}")
example_data.close()
return {
'min_nodes': int(min_n_nod),
'max_nodes': int(max_n_nod),
'min_elements': int(min_n_ele),
'max_elements': int(max_n_ele),
}
def _pad_1d(arr: np.ndarray, max_len: int, pad_val=0):
"""Pad 1D array to max_len with pad_val."""
out = np.full((max_len,), pad_val, dtype=arr.dtype)
n = min(len(arr), max_len)
out[:n] = arr[:n]
return out
def _pack_sample(data: np.lib.npyio.NpzFile) -> Dict[str, Any]:
"""Extract a single sample (no padding) with lengths."""
nodal = data['nodal_coord'] # (n_nodes, 2)
ele_nod = data['ele_nod'] # (n_elems, 2) int
pel = data['pel'] # (n_elems, 4) int (may be redundant for topology)
sample = {
'nodal': nodal.astype(np.float32),
'ele_nod': ele_nod.astype(np.int64),
'pel': pel.astype(np.int64),
'n_nodes': int(nodal.shape[0]),
'n_elems': int(ele_nod.shape[0]),
# Optional globals (kept here for conditioning/analysis)
'height': float(np.array(data['height']).item()) if 'height' in data else None,
'spacing': float(np.array(data['spacing']).item()) if 'spacing' in data else None,
'meta': {
'n_rods': int(np.array(data['n_rods']).item()) if 'n_rods' in data else None,
'n_beams': int(np.array(data['n_beams']).item()) if 'n_beams' in data else None,
'mode': str(np.array(data['truss_mode']).item()) if 'truss_mode' in data else None,
}
}
return sample
def load_truss_samples(dataset_folder="dataset") -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
"""
Load ALL samples WITHOUT padding.
Returns:
samples: list of dicts with variable-length arrays and lengths.
metadata: dict with global min/max lengths and counts.
"""
lengths = compute_sequence_lengths(dataset_folder)
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
samples = []
for f in npz_files:
data = np.load(os.path.join(dataset_folder, f))
samples.append(_pack_sample(data))
data.close()
metadata = {
'max_nodes': lengths['max_nodes'],
'max_elements': lengths['max_elements'],
'n_samples': len(samples),
}
return samples, metadata
def preprocess_s1(dataset_folder="dataset", normalize_type=None):
"""
Backwards-compatible preprocessor: still returns a big padded matrix
for legacy code, but now:
- pads integer index arrays with -1 (safe),
- also returns per-sample (n_nodes, n_elems) for bucketing.
"""
lengths = compute_sequence_lengths(dataset_folder)
max_nodes = lengths['max_nodes']
max_elements = lengths['max_elements']
total_dim = max_nodes * 2 + max_elements * 2 + max_elements * 4
npz_files = [f for f in os.listdir(dataset_folder) if f.endswith('.npz')]
samples = []
size_info = []
for f in npz_files:
data = np.load(os.path.join(dataset_folder, f))
nodal = data['nodal_coord'].flatten().astype(np.float32) # (n_nodes * 2,)
ele_nod = data['ele_nod'].flatten().astype(np.int64) # (n_elems * 2,)
pel = data['pel'].flatten().astype(np.int64) # (n_elems * 4,)
n_nodes = int(data['nodal_coord'].shape[0])
n_elems = int(data['ele_nod'].shape[0])
size_info.append([n_nodes, n_elems])
# Pad with safe values: floats→0.0, ints→-1
nodal_padded = _pad_1d(nodal, max_nodes * 2, pad_val=0.0)
ele_nod_padded = _pad_1d(ele_nod, max_elements * 2, pad_val=-1)
pel_padded = _pad_1d(pel, max_elements * 4, pad_val=-1)
sample = np.concatenate([nodal_padded, ele_nod_padded, pel_padded]).astype(np.float32)
samples.append(sample)
data.close()
data_array = np.stack(samples, axis=0) # (n_samples, total_dim)
if normalize_type == 'min_max':
data_min = data_array.min()
data_max = data_array.max()
data_array = (data_array - data_min) / (data_max - data_min + 1e-12)
elif normalize_type == 'z_score':
data_array = zscore(data_array, axis=0)
elif normalize_type is None:
pass
else:
raise ValueError(f"Unknown normalize_type: {normalize_type}")
metadata = {
'max_nodes': max_nodes,
'max_elements': max_elements,
'total_dim': total_dim,
'n_samples': len(samples),
'normalize_type': normalize_type,
'size_info': np.array(size_info, dtype=np.int32) # (n_samples, 2)
}
return data_array, metadata