File size: 1,272 Bytes
096ade4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | #data
dataset_name: deepjeb
test_name: "baseline"
gpu_id: 0
epochs: 300
data_dir: /raid/ansysai/udbhav/alpha_Xdata/xgm_data/data_prep_transformer/2_Structural_linear_static/deepjeb/1_VTK_surface/
json_file: ${data_dir}/../full_transform_params.json
splits_file: ${data_dir}/../
data_folder: ${dataset_name}
normalization: "std_norm"
norm_vars: "von_mises_stress"
physical_scale_for_test: True
# num_points: 40000
num_points: 15000
num_workers: 1
#model
indim: 3
outdim: 4
model: ansysLPFMs
hidden_dim: 256
n_heads: 8
n_decoder: 8
mlp_ratio: 2
#training
val_iter: 1
lr: 0.001
batch_size: 1
optimizer:
type: AdamW
scheduler: OneCycleLR #OneCycleLR
loss_type: huber # options: mse, mae, huber
# scheduler: LinearWarmupCosineAnnealingLR
num_processes: 1
max_grad_norm: 1.0
mixed_precision: True #currently default fp16 is selected by torch.autocast(). Fp16 gave the best results for Transformer based models.
eval: False
chunked_eval: True # Default with True is evaluation of max chunks of size num_points that can fit in a data sample, to avoid small last chunks
train_ckpt_load: False ## Will load best model if ckpt_load is false
#logging
# test_name: "Final_surface_only_OCLR_3p9M_float32_A100"
pos_embed_sincos: True
project_name: ${dataset_name} |