udbhav9628's picture
Upload folder using huggingface_hub
096ade4 verified
#data
dataset_name: deepjeb
test_name: "baseline"
gpu_id: 0
epochs: 300
data_dir: /raid/ansysai/udbhav/alpha_Xdata/xgm_data/data_prep_transformer/2_Structural_linear_static/deepjeb/1_VTK_surface/
json_file: ${data_dir}/../full_transform_params.json
splits_file: ${data_dir}/../
data_folder: ${dataset_name}
normalization: "std_norm"
norm_vars: "von_mises_stress"
physical_scale_for_test: True
# num_points: 40000
num_points: 15000
num_workers: 1
#model
indim: 3
outdim: 4
model: ansysLPFMs
hidden_dim: 256
n_heads: 8
n_decoder: 8
mlp_ratio: 2
#training
val_iter: 1
lr: 0.001
batch_size: 1
optimizer:
type: AdamW
scheduler: OneCycleLR #OneCycleLR
loss_type: huber # options: mse, mae, huber
# scheduler: LinearWarmupCosineAnnealingLR
num_processes: 1
max_grad_norm: 1.0
mixed_precision: True #currently default fp16 is selected by torch.autocast(). Fp16 gave the best results for Transformer based models.
eval: False
chunked_eval: True # Default with True is evaluation of max chunks of size num_points that can fit in a data sample, to avoid small last chunks
train_ckpt_load: False ## Will load best model if ckpt_load is false
#logging
# test_name: "Final_surface_only_OCLR_3p9M_float32_A100"
pos_embed_sincos: True
project_name: ${dataset_name}