Add files using upload-large-folder tool
Browse files- configs/combustion/cno.yaml +50 -0
- configs/combustion/deeponet.yaml +34 -0
- configs/combustion/dmd.yaml +25 -0
- configs/combustion/dpot_l.yaml +54 -0
- configs/combustion/dpot_s.yaml +54 -0
- configs/combustion/fno.yaml +37 -0
- configs/combustion/galerkin_transformer.yaml +72 -0
- configs/combustion/mwt.yaml +43 -0
- configs/combustion/surrogate_model/fno.yaml +28 -0
- configs/combustion/surrogate_model/unet.yaml +25 -0
- configs/combustion/trainsolver.yaml +46 -0
- configs/combustion/unet.yaml +34 -0
- configs/combustion/wdno.yaml +41 -0
- configs/controlled_cylinder/cno.yaml +54 -0
- configs/controlled_cylinder/deeponet.yaml +34 -0
- configs/controlled_cylinder/dmd.yaml +26 -0
- configs/controlled_cylinder/dpot_l.yaml +54 -0
- configs/controlled_cylinder/dpot_s.yaml +54 -0
- configs/controlled_cylinder/fno.yaml +37 -0
- configs/controlled_cylinder/galerkin_transformer.yaml +78 -0
- configs/controlled_cylinder/mwt.yaml +44 -0
- configs/controlled_cylinder/trainsolver.yaml +47 -0
- configs/controlled_cylinder/unet.yaml +34 -0
- configs/controlled_cylinder/wdno.yaml +41 -0
- configs/cylinder/cno.yaml +54 -0
- configs/cylinder/deeponet.yaml +38 -0
- configs/cylinder/dmd.yaml +29 -0
- configs/cylinder/dpot_l.yaml +54 -0
- configs/cylinder/dpot_s.yaml +54 -0
- configs/cylinder/fno.yaml +39 -0
- configs/cylinder/galerkin_transformer.yaml +72 -0
- configs/cylinder/mwt.yaml +44 -0
- configs/cylinder/trainsolver.yaml +47 -0
- configs/cylinder/unet.yaml +36 -0
- configs/cylinder/wdno.yaml +43 -0
- configs/foil/cno.yaml +54 -0
- configs/foil/deeponet.yaml +35 -0
- configs/foil/dmd.yaml +26 -0
- configs/foil/dpot_l.yaml +54 -0
- configs/foil/dpot_s.yaml +54 -0
- configs/foil/fno.yaml +37 -0
- configs/foil/galerkin_transformer.yaml +78 -0
- configs/foil/mwt.yaml +44 -0
- configs/foil/trainsolver.yaml +47 -0
- configs/foil/unet.yaml +34 -0
- configs/foil/wdno.yaml +41 -0
- configs/fsi/cno.yaml +53 -0
- configs/fsi/deeponet.yaml +37 -0
- configs/fsi/dmd.yaml +25 -0
- configs/fsi/dpot_l.yaml +54 -0
configs/combustion/cno.yaml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "cno_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "cno"
|
| 18 |
+
checkpoint_path: ./results/cno/cno_combustion_numerical_False/2025-09-16_05-34-53/model_3000.pth # for resume training
|
| 19 |
+
N_layers: 3 # Number of (D) or (U) blocks in the network
|
| 20 |
+
N_res: 1, # Number of (R) blocks per level (except the neck)
|
| 21 |
+
N_res_neck: 8, # Number of (R) blocks in the neck
|
| 22 |
+
channel_multiplier: 32, # How the number of channels evolve?
|
| 23 |
+
conv_kernel: 3, # Size of all the kernels
|
| 24 |
+
cutoff_den: 2.0001, # Filter property 1.
|
| 25 |
+
filter_size: 6, # Filter property 2.
|
| 26 |
+
lrelu_upsampling: 2, # Filter property 3.
|
| 27 |
+
half_width_mult: 0.8, # Filter property 4.
|
| 28 |
+
radial: False, # Filter property 5. Is filter radial?
|
| 29 |
+
batch_norm: True, # Add BN? We do not add BN in lifting/projection layer
|
| 30 |
+
out_dim: 1, # Target dimension
|
| 31 |
+
out_size: 1, # If out_size is 1, Then out_size = in_size. Else must be int
|
| 32 |
+
expand_input: False, # Start with original in_size, or expand it (pad zeros in the spectrum)
|
| 33 |
+
latent_lift_proj_dim: 64, # Intermediate latent dimension in the lifting/projection layer
|
| 34 |
+
add_inv: True, # Add invariant block (I) after the intermediate connections?
|
| 35 |
+
activation: 'LeakyReLU' # Activation function can be 'LeakyReLU' or 'lrelu'
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# training
|
| 39 |
+
is_use_tb: True
|
| 40 |
+
scheduler: cosine # step, cosine
|
| 41 |
+
step_size: 1000 # only applicable for step scheduler
|
| 42 |
+
num_update: 5000
|
| 43 |
+
train_batch_size: 16
|
| 44 |
+
test_batch_size: 64
|
| 45 |
+
lr: 0.0003
|
| 46 |
+
clip_grad_norm: 0.
|
| 47 |
+
|
| 48 |
+
# evaluation
|
| 49 |
+
N_autoregressive: 3
|
| 50 |
+
N_plot: 0
|
configs/combustion/deeponet.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "deeponet_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.0 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "deeponet"
|
| 18 |
+
checkpoint_path: ./results/deeponet/deeponet_combustion_numerical_False/2025-09-16_22-50-38/model_2220.pth # for resume training and evaluation
|
| 19 |
+
p: 128
|
| 20 |
+
dropout_rate: 0.1
|
| 21 |
+
|
| 22 |
+
# training
|
| 23 |
+
is_use_tb: True
|
| 24 |
+
scheduler: cosine # step, cosine
|
| 25 |
+
step_size: 1000 # only applicable for step scheduler
|
| 26 |
+
num_update: 3000
|
| 27 |
+
train_batch_size: 64
|
| 28 |
+
test_batch_size: 64
|
| 29 |
+
lr: 0.0005
|
| 30 |
+
clip_grad_norm: 0.
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_autoregressive: 1
|
| 34 |
+
N_plot: 1
|
configs/combustion/dmd.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dmd_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "dmd"
|
| 14 |
+
is_resume: False
|
| 15 |
+
n_modes: 10
|
| 16 |
+
n_predict: 20
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
input_feature: 2
|
| 20 |
+
checkpoint_path: 'no model needed'
|
| 21 |
+
test_batch_size: 12
|
| 22 |
+
|
| 23 |
+
# evaluation
|
| 24 |
+
N_plot: 1
|
| 25 |
+
N_autoregressive: 1
|
configs/combustion/dpot_l.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_l_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_L.pth" # Large DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Large model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 16
|
| 25 |
+
out_channels: 16
|
| 26 |
+
embed_dim: 1536
|
| 27 |
+
depth: 24
|
| 28 |
+
n_blocks: 16
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
out_layer_dim: 1536 # Match checkpoint out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters (from checkpoint)
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 10000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.0001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/combustion/dpot_s.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_s_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_S.pth" # Small DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Small model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 16
|
| 25 |
+
out_channels: 16
|
| 26 |
+
embed_dim: 1024
|
| 27 |
+
depth: 6
|
| 28 |
+
n_blocks: 8
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 1
|
| 31 |
+
out_layer_dim: 32 # Keep default out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 10000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/combustion/fno.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "fno_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.0 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "fno"
|
| 18 |
+
checkpoint_path: ./results/fno/fno_combustion_numerical_False/2025-09-14_08-01-31/model_0720.pth
|
| 19 |
+
modes1: 4
|
| 20 |
+
modes2: 16
|
| 21 |
+
modes3: 16
|
| 22 |
+
n_layers: 4
|
| 23 |
+
width: 64
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 1000 # only applicable for step scheduler
|
| 29 |
+
num_update: 2000
|
| 30 |
+
train_batch_size: 64
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.01
|
| 33 |
+
clip_grad_norm: 0.
|
| 34 |
+
|
| 35 |
+
# evaluation
|
| 36 |
+
N_autoregressive: 1
|
| 37 |
+
N_plot: 0
|
configs/combustion/galerkin_transformer.yaml
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "galerkin_transformer_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "galerkin_transformer"
|
| 18 |
+
checkpoint_path: ./results/galerkin_transformer/galerkin_transformer_combustion_real_True/2025-09-16_23-02-38/model_4900.pth
|
| 19 |
+
|
| 20 |
+
pos_dim: 1 # pos dim
|
| 21 |
+
n_hidden: 256
|
| 22 |
+
num_feat_layers: 0
|
| 23 |
+
num_encoder_layers: 1
|
| 24 |
+
n_head: 4
|
| 25 |
+
dim_feedforward: 256
|
| 26 |
+
feat_extract_type: null
|
| 27 |
+
attention_type: galerkin
|
| 28 |
+
xavier_init: 0.01
|
| 29 |
+
diagonal_weight: 0.01
|
| 30 |
+
symmetric_init: False
|
| 31 |
+
layer_norm: False
|
| 32 |
+
attn_norm: True
|
| 33 |
+
norm_eps: 0.0000001
|
| 34 |
+
batch_norm: False
|
| 35 |
+
return_attn_weight: False
|
| 36 |
+
return_latent: False
|
| 37 |
+
decoder_type: ifft2
|
| 38 |
+
spacial_dim: 3
|
| 39 |
+
spacial_fc: True
|
| 40 |
+
upsample_mode: interp
|
| 41 |
+
downsample_mode: interp
|
| 42 |
+
freq_dim: 128
|
| 43 |
+
boundary_condition: None
|
| 44 |
+
num_regressor_layers: 1
|
| 45 |
+
fourier_modes_x: 16
|
| 46 |
+
fourier_modes_y: 16
|
| 47 |
+
fourier_modes_t: 4
|
| 48 |
+
regressor_activation: silu
|
| 49 |
+
downscaler_activation: relu
|
| 50 |
+
upscaler_activation: silu
|
| 51 |
+
last_activation: True
|
| 52 |
+
dropout: 0.0
|
| 53 |
+
downscaler_dropout: 0.05
|
| 54 |
+
upscaler_dropout: 0.0
|
| 55 |
+
ffn_dropout: 0.05
|
| 56 |
+
encoder_dropout: 0.05
|
| 57 |
+
decoder_dropout: 0
|
| 58 |
+
debug: False
|
| 59 |
+
|
| 60 |
+
# training
|
| 61 |
+
is_use_tb: True
|
| 62 |
+
scheduler: cosine # step, cosine
|
| 63 |
+
step_size: 1000 # only applicable for step scheduler
|
| 64 |
+
num_update: 5000
|
| 65 |
+
train_batch_size: 16
|
| 66 |
+
test_batch_size: 16
|
| 67 |
+
lr: 0.01
|
| 68 |
+
clip_grad_norm: 0.
|
| 69 |
+
|
| 70 |
+
# evaluation
|
| 71 |
+
N_autoregressive: 1
|
| 72 |
+
N_plot: 1
|
configs/combustion/mwt.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "mwt_combustion"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "combustion"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.5
|
| 16 |
+
noise_scale: 0.0 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
# model
|
| 19 |
+
model_name: "mwt"
|
| 20 |
+
checkpoint_path: "./results/mwt/mwt_combustion_numerical_False/2025-09-18_14-45-34/model_3800.pth"
|
| 21 |
+
|
| 22 |
+
k: 3 # wavelet filter size
|
| 23 |
+
alpha: 5 # fourier filter parameter, should be less or equal to T/2
|
| 24 |
+
c: 4 # increase factor
|
| 25 |
+
nCZ: 4 # number of CZ blocks
|
| 26 |
+
L: 0
|
| 27 |
+
base: "legendre" # chebyshev
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# training
|
| 31 |
+
is_use_tb: True
|
| 32 |
+
scheduler: cosine # step, cosine
|
| 33 |
+
step_size: 1000 # only applicable for step scheduler
|
| 34 |
+
num_update: 5000
|
| 35 |
+
test_interval: 200
|
| 36 |
+
train_batch_size: 32
|
| 37 |
+
test_batch_size: 64
|
| 38 |
+
lr: 0.001
|
| 39 |
+
clip_grad_norm: 0.
|
| 40 |
+
|
| 41 |
+
# evaluation
|
| 42 |
+
N_autoregressive: 3
|
| 43 |
+
N_plot: 5
|
configs/combustion/surrogate_model/fno.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "surrogate_fno_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 10
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "fno"
|
| 14 |
+
modes1: 4
|
| 15 |
+
modes2: 16
|
| 16 |
+
modes3: 16
|
| 17 |
+
n_layers: 4
|
| 18 |
+
width: 64
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
is_use_tb: True
|
| 22 |
+
scheduler: cosine # step, cosine
|
| 23 |
+
step_size: 1000 # only applicable for step scheduler
|
| 24 |
+
num_update: 1000
|
| 25 |
+
train_batch_size: 16
|
| 26 |
+
test_batch_size: 16
|
| 27 |
+
lr: 0.0001
|
| 28 |
+
clip_grad_norm: 0.
|
configs/combustion/surrogate_model/unet.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "surrogate_unet_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 10
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "unet"
|
| 14 |
+
dim_mults: [1, 2]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# training
|
| 18 |
+
is_use_tb: True
|
| 19 |
+
scheduler: cosine # step, cosine
|
| 20 |
+
step_size: 1000 # only applicable for step scheduler
|
| 21 |
+
num_update: 1000
|
| 22 |
+
train_batch_size: 2
|
| 23 |
+
test_batch_size: 2
|
| 24 |
+
lr: 0.0001
|
| 25 |
+
clip_grad_norm: 0.
|
configs/combustion/trainsolver.yaml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "transolver_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
# data
|
| 6 |
+
dataset_name: "combustion"
|
| 7 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 8 |
+
num_workers: 12
|
| 9 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 10 |
+
|
| 11 |
+
# data parameters for training
|
| 12 |
+
mask_prob: 0.1
|
| 13 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 14 |
+
|
| 15 |
+
# model
|
| 16 |
+
model_name: "transolver"
|
| 17 |
+
space_dim: 16
|
| 18 |
+
n_layers: 1
|
| 19 |
+
n_hidden: 256
|
| 20 |
+
n_head: 8
|
| 21 |
+
H: 64
|
| 22 |
+
W: 64
|
| 23 |
+
D: 20
|
| 24 |
+
fun_dim: 0 # 1
|
| 25 |
+
out_dim: 16
|
| 26 |
+
ref: 4
|
| 27 |
+
dropout: 0.1
|
| 28 |
+
act: "gelu"
|
| 29 |
+
mlp_ratio: 4
|
| 30 |
+
slice_num: 16
|
| 31 |
+
|
| 32 |
+
checkpoint_path: "./results/transolver/transolver_combustion_numerical_False/2025-09-15_13-23-53/model_3900.pth"
|
| 33 |
+
# training
|
| 34 |
+
is_use_tb: True
|
| 35 |
+
scheduler: cosine # step, cosine
|
| 36 |
+
step_size: 1000 # only applicable for step scheduler
|
| 37 |
+
num_update: 5000
|
| 38 |
+
test_interval: 200
|
| 39 |
+
train_batch_size: 16
|
| 40 |
+
test_batch_size: 16
|
| 41 |
+
lr: 0.0007
|
| 42 |
+
clip_grad_norm: 0.
|
| 43 |
+
|
| 44 |
+
# evaluation
|
| 45 |
+
N_autoregressive: 3
|
| 46 |
+
N_plot: 0
|
configs/combustion/unet.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "unet_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "unet"
|
| 14 |
+
is_resume: False
|
| 15 |
+
checkpoint_path: './results/unet/unet_combustion_numerical_False/2025-09-17_00-56-36/model_7600.pth' # for resume training
|
| 16 |
+
dim_mults: [1, 2, 4]
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
is_use_tb: True
|
| 22 |
+
epochs: 200
|
| 23 |
+
step_size: 50
|
| 24 |
+
gamma: 0.5
|
| 25 |
+
train_batch_size: 12
|
| 26 |
+
test_batch_size: 12
|
| 27 |
+
lr: 0.0001
|
| 28 |
+
clip_grad_norm: 0.
|
| 29 |
+
scheduler: cosine # step, cosine
|
| 30 |
+
num_update: 10000
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_plot: 1
|
| 34 |
+
N_autoregressive: 1
|
configs/combustion/wdno.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "wdno_combustion"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "combustion"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: none # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.0 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "wdno"
|
| 18 |
+
checkpoint_path: ./results/wdno/wdno_combustion_numerical_False/2025-09-16_12-34-36/model_64000.pth
|
| 19 |
+
dim: 256
|
| 20 |
+
dim_mults: [1, 2]
|
| 21 |
+
wave_type: "bior1.3"
|
| 22 |
+
pad_mode: "zero"
|
| 23 |
+
beta_schedule: "sigmoid"
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 0 # only applicable for step scheduler
|
| 29 |
+
num_update: 100000
|
| 30 |
+
train_batch_size: 16
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.0001
|
| 33 |
+
clip_grad_norm: 1.0 # 0 for no clip
|
| 34 |
+
|
| 35 |
+
# sampling
|
| 36 |
+
sampling_timesteps: 15 # number of sampling timesteps (ddim for faster inference)
|
| 37 |
+
ddim_sampling_eta: 1.
|
| 38 |
+
|
| 39 |
+
# evaluation
|
| 40 |
+
N_autoregressive: 1
|
| 41 |
+
N_plot: 0
|
configs/controlled_cylinder/cno.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "cno_control"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "controlled_cylinder"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
# model
|
| 19 |
+
model_name: "cno"
|
| 20 |
+
checkpoint_path: ./results/cno/cno_control_numerical_False/2025-09-07_00-40-24/model_4400.pth # for resume training
|
| 21 |
+
|
| 22 |
+
N_layers: 3 # Number of (D) or (U) blocks in the network
|
| 23 |
+
N_res: 1, # Number of (R) blocks per level (except the neck)
|
| 24 |
+
N_res_neck: 8, # Number of (R) blocks in the neck
|
| 25 |
+
channel_multiplier: 32, # How the number of channels evolve?
|
| 26 |
+
conv_kernel: 3, # Size of all the kernels
|
| 27 |
+
cutoff_den: 2.0001, # Filter property 1.
|
| 28 |
+
filter_size: 6, # Filter property 2.
|
| 29 |
+
|
| 30 |
+
lrelu_upsampling: 2, # Filter property 3.
|
| 31 |
+
half_width_mult: 0.8, # Filter property 4.
|
| 32 |
+
radial: False, # Filter property 5. Is filter radial?
|
| 33 |
+
batch_norm: True, # Add BN? We do not add BN in lifting/projection layer
|
| 34 |
+
out_dim: 1, # Target dimension
|
| 35 |
+
out_size: 1, # If out_size is 1, Then out_size = in_size. Else must be int
|
| 36 |
+
expand_input: False, # Start with original in_size, or expand it (pad zeros in the spectrum)
|
| 37 |
+
latent_lift_proj_dim: 64, # Intermediate latent dimension in the lifting/projection layer
|
| 38 |
+
add_inv: True, # Add invariant block (I) after the intermediate connections?
|
| 39 |
+
activation: 'LeakyReLU' # Activation function can be 'LeakyReLU' or 'lrelu'
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# training
|
| 43 |
+
is_use_tb: True
|
| 44 |
+
scheduler: cosine # step, cosine
|
| 45 |
+
step_size: 1000 # only applicable for step scheduler
|
| 46 |
+
num_update: 5000
|
| 47 |
+
train_batch_size: 32
|
| 48 |
+
test_batch_size: 64
|
| 49 |
+
lr: 0.0003
|
| 50 |
+
clip_grad_norm: 0.
|
| 51 |
+
|
| 52 |
+
# evaluation
|
| 53 |
+
N_autoregressive: 3
|
| 54 |
+
N_plot: 0
|
configs/controlled_cylinder/deeponet.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: deeponet_control
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: ./results/
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: controlled_cylinder
|
| 8 |
+
dataset_root: /wutailin/real_benchmark/
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: gaussian # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: deeponet
|
| 18 |
+
checkpoint_path: ./results/deeponet/deeponet_control_numerical_False/2025-09-08_14-27-56/model_0600.pth
|
| 19 |
+
p: 256
|
| 20 |
+
dropout_rate: 0.1
|
| 21 |
+
|
| 22 |
+
# training
|
| 23 |
+
is_use_tb: true
|
| 24 |
+
scheduler: cosine # step, cosine
|
| 25 |
+
step_size: 1000 # only applicable for step scheduler
|
| 26 |
+
num_update: 5000
|
| 27 |
+
train_batch_size: 64
|
| 28 |
+
test_batch_size: 64
|
| 29 |
+
lr: 0.00005
|
| 30 |
+
clip_grad_norm: 0.
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_autoregressive: 1
|
| 34 |
+
N_plot: 1
|
configs/controlled_cylinder/dmd.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dmd_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "dmd"
|
| 14 |
+
is_resume: False
|
| 15 |
+
n_modes: 10
|
| 16 |
+
n_predict: 10
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
input_feature: 2
|
| 20 |
+
checkpoint_path: 'no model needed'
|
| 21 |
+
test_batch_size: 12
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# evaluation
|
| 25 |
+
N_plot: 1
|
| 26 |
+
N_autoregressive: 1
|
configs/controlled_cylinder/dpot_l.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_l_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_L.pth" # Large DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Large model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 5
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1536
|
| 27 |
+
depth: 24
|
| 28 |
+
n_blocks: 16
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
out_layer_dim: 1536 # Match checkpoint out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters (from checkpoint)
|
| 38 |
+
in_timesteps: 10
|
| 39 |
+
out_timesteps: 10
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 50000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.00005
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/controlled_cylinder/dpot_s.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_s_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_S.pth" # Small DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Small model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 5
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1024
|
| 27 |
+
depth: 6
|
| 28 |
+
n_blocks: 8
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 1
|
| 31 |
+
out_layer_dim: 32 # Keep default out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters
|
| 38 |
+
in_timesteps: 10
|
| 39 |
+
out_timesteps: 10
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 50000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.0005
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/controlled_cylinder/fno.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "fno_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "fno"
|
| 18 |
+
checkpoint_path: ./results/fno/fno_control_numerical_False/2025-09-02_18-38-24/model_3760.pth
|
| 19 |
+
modes1: 4
|
| 20 |
+
modes2: 12
|
| 21 |
+
modes3: 16
|
| 22 |
+
n_layers: 4
|
| 23 |
+
width: 64
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 1000 # only applicable for step scheduler
|
| 29 |
+
num_update: 4000
|
| 30 |
+
train_batch_size: 32
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.0001
|
| 33 |
+
clip_grad_norm: 0.
|
| 34 |
+
|
| 35 |
+
# evaluation
|
| 36 |
+
N_autoregressive: 1
|
| 37 |
+
N_plot: 0
|
configs/controlled_cylinder/galerkin_transformer.yaml
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "galerkin_transformer_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "galerkin_transformer"
|
| 18 |
+
checkpoint_path: ./results/galerkin_transformer/galerkin_transformer_control_numerical_False/2025-09-08_15-47-23/model_4500.pth # for resume training
|
| 19 |
+
|
| 20 |
+
# modes1: 4
|
| 21 |
+
# modes2: 12
|
| 22 |
+
# modes3: 16
|
| 23 |
+
# n_layers: 4
|
| 24 |
+
# width: 64
|
| 25 |
+
|
| 26 |
+
pos_dim: 1 # pos dim
|
| 27 |
+
n_hidden: 256
|
| 28 |
+
num_feat_layers: 0
|
| 29 |
+
num_encoder_layers: 1
|
| 30 |
+
n_head: 4
|
| 31 |
+
dim_feedforward: 256
|
| 32 |
+
feat_extract_type: null
|
| 33 |
+
attention_type: galerkin
|
| 34 |
+
xavier_init: 0.01
|
| 35 |
+
diagonal_weight: 0.01
|
| 36 |
+
symmetric_init: False
|
| 37 |
+
layer_norm: False
|
| 38 |
+
attn_norm: True
|
| 39 |
+
norm_eps: 0.0000001
|
| 40 |
+
batch_norm: False
|
| 41 |
+
return_attn_weight: False
|
| 42 |
+
return_latent: False
|
| 43 |
+
decoder_type: ifft2
|
| 44 |
+
spacial_dim: 3
|
| 45 |
+
spacial_fc: True
|
| 46 |
+
upsample_mode: interp
|
| 47 |
+
downsample_mode: interp
|
| 48 |
+
freq_dim: 128
|
| 49 |
+
boundary_condition: None
|
| 50 |
+
num_regressor_layers: 1
|
| 51 |
+
fourier_modes_x: 12
|
| 52 |
+
fourier_modes_y: 16
|
| 53 |
+
fourier_modes_t: 4
|
| 54 |
+
regressor_activation: silu
|
| 55 |
+
downscaler_activation: relu
|
| 56 |
+
upscaler_activation: silu
|
| 57 |
+
last_activation: True
|
| 58 |
+
dropout: 0.0
|
| 59 |
+
downscaler_dropout: 0.05
|
| 60 |
+
upscaler_dropout: 0.0
|
| 61 |
+
ffn_dropout: 0.05
|
| 62 |
+
encoder_dropout: 0.05
|
| 63 |
+
decoder_dropout: 0
|
| 64 |
+
debug: False
|
| 65 |
+
|
| 66 |
+
# training
|
| 67 |
+
is_use_tb: True
|
| 68 |
+
scheduler: cosine # step, cosine
|
| 69 |
+
step_size: 1000 # only applicable for step scheduler
|
| 70 |
+
num_update: 5000
|
| 71 |
+
train_batch_size: 32
|
| 72 |
+
test_batch_size: 32
|
| 73 |
+
lr: 0.0001
|
| 74 |
+
clip_grad_norm: 0.
|
| 75 |
+
|
| 76 |
+
# evaluation
|
| 77 |
+
N_autoregressive: 1
|
| 78 |
+
N_plot: 1
|
configs/controlled_cylinder/mwt.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "fno_control"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "controlled_cylinder"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# model
|
| 20 |
+
model_name: "mwt"
|
| 21 |
+
checkpoint_path: "/hupeiyan/along/realpdebench/realpdebench/results/mwt/fno_control_numerical_False/2025-09-08_08-27-42/model_4900.pth"
|
| 22 |
+
|
| 23 |
+
k: 3 # wavelet filter size
|
| 24 |
+
alpha: 5 # fourier filter parameter, should be less or equal to T/2
|
| 25 |
+
c: 4 # increase factor
|
| 26 |
+
nCZ: 4 # number of CZ blocks
|
| 27 |
+
L: 0
|
| 28 |
+
base: "legendre" # chebyshev
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# training
|
| 32 |
+
is_use_tb: True
|
| 33 |
+
scheduler: cosine # step, cosine
|
| 34 |
+
step_size: 1000 # only applicable for step scheduler
|
| 35 |
+
num_update: 5000
|
| 36 |
+
test_interval: 200
|
| 37 |
+
train_batch_size: 32
|
| 38 |
+
test_batch_size: 64
|
| 39 |
+
lr: 0.0005
|
| 40 |
+
clip_grad_norm: 0.
|
| 41 |
+
|
| 42 |
+
# evaluation
|
| 43 |
+
N_autoregressive: 3
|
| 44 |
+
N_plot: 5
|
configs/controlled_cylinder/trainsolver.yaml
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "transolver_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "transolver"
|
| 18 |
+
space_dim: 5
|
| 19 |
+
n_layers: 1
|
| 20 |
+
n_hidden: 256
|
| 21 |
+
n_head: 8
|
| 22 |
+
H: 64
|
| 23 |
+
W: 128
|
| 24 |
+
D: 10
|
| 25 |
+
fun_dim: 0 # 1
|
| 26 |
+
out_dim: 3
|
| 27 |
+
ref: 4
|
| 28 |
+
dropout: 0.1
|
| 29 |
+
act: "gelu"
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
slice_num: 16
|
| 32 |
+
|
| 33 |
+
checkpoint_path: "./results/transolver/transolver_control_numerical_False/2025-09-09_12-46-08/model_2100.pth"
|
| 34 |
+
# training
|
| 35 |
+
is_use_tb: True
|
| 36 |
+
scheduler: cosine # step, cosine
|
| 37 |
+
step_size: 1000 # only applicable for step scheduler
|
| 38 |
+
num_update: 5000
|
| 39 |
+
test_interval: 200
|
| 40 |
+
train_batch_size: 32
|
| 41 |
+
test_batch_size: 32
|
| 42 |
+
lr: 0.001
|
| 43 |
+
clip_grad_norm: 0.
|
| 44 |
+
|
| 45 |
+
# evaluation
|
| 46 |
+
N_autoregressive: 2
|
| 47 |
+
N_plot: 0
|
configs/controlled_cylinder/unet.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "unet_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "unet"
|
| 14 |
+
is_resume: False
|
| 15 |
+
checkpoint_path: './results/unet/unet_control_numerical_False/2025-09-04_14-39-03/model_4800.pth' # for resume training
|
| 16 |
+
dim_mults: [1, 2, 4]
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
is_use_tb: True
|
| 22 |
+
epochs: 200
|
| 23 |
+
step_size: 50
|
| 24 |
+
gamma: 0.5
|
| 25 |
+
train_batch_size: 12
|
| 26 |
+
test_batch_size: 12
|
| 27 |
+
lr: 0.0001
|
| 28 |
+
clip_grad_norm: 0.
|
| 29 |
+
scheduler: cosine # step, cosine
|
| 30 |
+
num_update: 10000
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_plot: 1
|
| 34 |
+
N_autoregressive: 1
|
configs/controlled_cylinder/wdno.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "wdno_control"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "controlled_cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "none" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "wdno"
|
| 18 |
+
checkpoint_path: ./results/wdno/wdno_control_numerical_False/2025-09-05_06-25-58/model_48000.pth
|
| 19 |
+
dim: 256
|
| 20 |
+
dim_mults: [1, 2, 4]
|
| 21 |
+
wave_type: bior1.1
|
| 22 |
+
pad_mode: "zero"
|
| 23 |
+
beta_schedule: "sigmoid"
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 0 # only applicable for step scheduler
|
| 29 |
+
num_update: 120000
|
| 30 |
+
train_batch_size: 16
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.00001
|
| 33 |
+
clip_grad_norm: 1.0
|
| 34 |
+
|
| 35 |
+
# sampling
|
| 36 |
+
sampling_timesteps: 25 # number of sampling timesteps (ddim for faster inference)
|
| 37 |
+
ddim_sampling_eta: 1.
|
| 38 |
+
|
| 39 |
+
# evaluation
|
| 40 |
+
N_autoregressive: 1
|
| 41 |
+
N_plot: 1
|
configs/cylinder/cno.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "cno_cylinder"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "cylinder"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
# model
|
| 19 |
+
model_name: "cno"
|
| 20 |
+
checkpoint_path: ./results/cno/cno_cylinder_numerical_False/2025-09-16_14-34-26/model_4100.pth # for resume training
|
| 21 |
+
|
| 22 |
+
N_layers: 3 # Number of (D) or (U) blocks in the network
|
| 23 |
+
N_res: 1, # Number of (R) blocks per level (except the neck)
|
| 24 |
+
N_res_neck: 8, # Number of (R) blocks in the neck
|
| 25 |
+
channel_multiplier: 32, # How the number of channels evolve?
|
| 26 |
+
conv_kernel: 3, # Size of all the kernels
|
| 27 |
+
cutoff_den: 2.0001, # Filter property 1.
|
| 28 |
+
filter_size: 6, # Filter property 2.
|
| 29 |
+
|
| 30 |
+
lrelu_upsampling: 2, # Filter property 3.
|
| 31 |
+
half_width_mult: 0.8, # Filter property 4.
|
| 32 |
+
radial: False, # Filter property 5. Is filter radial?
|
| 33 |
+
batch_norm: True, # Add BN? We do not add BN in lifting/projection layer
|
| 34 |
+
out_dim: 1, # Target dimension
|
| 35 |
+
out_size: 1, # If out_size is 1, Then out_size = in_size. Else must be int
|
| 36 |
+
expand_input: False, # Start with original in_size, or expand it (pad zeros in the spectrum)
|
| 37 |
+
latent_lift_proj_dim: 64, # Intermediate latent dimension in the lifting/projection layer
|
| 38 |
+
add_inv: True, # Add invariant block (I) after the intermediate connections?
|
| 39 |
+
activation: 'LeakyReLU' # Activation function can be 'LeakyReLU' or 'lrelu'
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# training
|
| 43 |
+
is_use_tb: True
|
| 44 |
+
scheduler: cosine # step, cosine
|
| 45 |
+
step_size: 1000 # only applicable for step scheduler
|
| 46 |
+
num_update: 5000
|
| 47 |
+
train_batch_size: 16
|
| 48 |
+
test_batch_size: 64
|
| 49 |
+
lr: 0.0003
|
| 50 |
+
clip_grad_norm: 0.
|
| 51 |
+
|
| 52 |
+
# evaluation
|
| 53 |
+
N_autoregressive: 3
|
| 54 |
+
N_plot: 0
|
configs/cylinder/deeponet.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: deeponet_cylinder
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: ./results/
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: cylinder
|
| 8 |
+
dataset_root: /wutailin/real_benchmark/
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: gaussian # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: deeponet
|
| 18 |
+
checkpoint_path:
|
| 19 |
+
./results/deeponet/deeponet_cylinder_numerical_False/2025-09-14_19-05-53/model_0200.pth
|
| 20 |
+
p: 128
|
| 21 |
+
dropout_rate: 0.1
|
| 22 |
+
|
| 23 |
+
# training
|
| 24 |
+
is_use_tb: true
|
| 25 |
+
scheduler: cosine # step, cosine
|
| 26 |
+
step_size: 1000 # only applicable for step scheduler
|
| 27 |
+
num_update: 5000
|
| 28 |
+
train_batch_size: 32
|
| 29 |
+
test_batch_size: 64
|
| 30 |
+
lr: 0.0001
|
| 31 |
+
clip_grad_norm: 0.
|
| 32 |
+
|
| 33 |
+
# evaluation
|
| 34 |
+
N_autoregressive: 10
|
| 35 |
+
N_plot: 1
|
| 36 |
+
probe_diagnostic: True
|
| 37 |
+
N_plot_probe: 12
|
| 38 |
+
|
configs/cylinder/dmd.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dmd_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "dmd"
|
| 14 |
+
is_resume: False
|
| 15 |
+
n_modes: 10
|
| 16 |
+
n_predict: 20
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
input_feature: 2
|
| 20 |
+
checkpoint_path: 'no model needed'
|
| 21 |
+
|
| 22 |
+
test_batch_size: 12
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# evaluation
|
| 26 |
+
N_plot: 1
|
| 27 |
+
N_autoregressive: 1
|
| 28 |
+
probe_diagnostic: True
|
| 29 |
+
N_plot_probe: 12
|
configs/cylinder/dpot_l.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_l_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_L.pth" # Large DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Large model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1536
|
| 27 |
+
depth: 24
|
| 28 |
+
n_blocks: 16
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
out_layer_dim: 1536 # Match checkpoint out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters (from checkpoint)
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 2000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.0001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/cylinder/dpot_s.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_s_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_S.pth" # Small DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Small model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1024
|
| 27 |
+
depth: 6
|
| 28 |
+
n_blocks: 8
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 1
|
| 31 |
+
out_layer_dim: 32 # Keep default out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 2000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/cylinder/fno.yaml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "fno_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "fno"
|
| 18 |
+
checkpoint_path: ./results/fno/fno_cylinder_numerical_False/2025-09-12_11-17-53/model_3840.pth
|
| 19 |
+
modes1: 4
|
| 20 |
+
modes2: 12
|
| 21 |
+
modes3: 16
|
| 22 |
+
n_layers: 4
|
| 23 |
+
width: 64
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb:
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 1000 # only applicable for step scheduler
|
| 29 |
+
num_update: 4000
|
| 30 |
+
train_batch_size: 32
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.0001
|
| 33 |
+
clip_grad_norm: 0.
|
| 34 |
+
|
| 35 |
+
# evaluation
|
| 36 |
+
N_autoregressive: 10
|
| 37 |
+
N_plot: 0
|
| 38 |
+
probe_diagnostic: True
|
| 39 |
+
N_plot_probe: 12
|
configs/cylinder/galerkin_transformer.yaml
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "galerkin_transformer_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "galerkin_transformer"
|
| 18 |
+
checkpoint_path: ./results/galerkin_transformer/galerkin_transformer_cylinder_real_False/2025-09-15_10-50-53/model_2200.pth
|
| 19 |
+
|
| 20 |
+
pos_dim: 1 # pos dim
|
| 21 |
+
n_hidden: 256
|
| 22 |
+
num_feat_layers: 0
|
| 23 |
+
num_encoder_layers: 1
|
| 24 |
+
n_head: 4
|
| 25 |
+
dim_feedforward: 256
|
| 26 |
+
feat_extract_type: null
|
| 27 |
+
attention_type: galerkin
|
| 28 |
+
xavier_init: 0.01
|
| 29 |
+
diagonal_weight: 0.01
|
| 30 |
+
symmetric_init: False
|
| 31 |
+
layer_norm: False
|
| 32 |
+
attn_norm: True
|
| 33 |
+
norm_eps: 0.0000001
|
| 34 |
+
batch_norm: False
|
| 35 |
+
return_attn_weight: False
|
| 36 |
+
return_latent: False
|
| 37 |
+
decoder_type: ifft2
|
| 38 |
+
spacial_dim: 3
|
| 39 |
+
spacial_fc: True
|
| 40 |
+
upsample_mode: interp
|
| 41 |
+
downsample_mode: interp
|
| 42 |
+
freq_dim: 128
|
| 43 |
+
boundary_condition: None
|
| 44 |
+
num_regressor_layers: 1
|
| 45 |
+
fourier_modes_x: 16
|
| 46 |
+
fourier_modes_y: 20
|
| 47 |
+
fourier_modes_t: 4
|
| 48 |
+
regressor_activation: silu
|
| 49 |
+
downscaler_activation: relu
|
| 50 |
+
upscaler_activation: silu
|
| 51 |
+
last_activation: True
|
| 52 |
+
dropout: 0.0
|
| 53 |
+
downscaler_dropout: 0.05
|
| 54 |
+
upscaler_dropout: 0.0
|
| 55 |
+
ffn_dropout: 0.05
|
| 56 |
+
encoder_dropout: 0.05
|
| 57 |
+
decoder_dropout: 0
|
| 58 |
+
debug: False
|
| 59 |
+
|
| 60 |
+
# training
|
| 61 |
+
is_use_tb: True
|
| 62 |
+
scheduler: cosine # step, cosine
|
| 63 |
+
step_size: 1000 # only applicable for step scheduler
|
| 64 |
+
num_update: 5000
|
| 65 |
+
train_batch_size: 16
|
| 66 |
+
test_batch_size: 16
|
| 67 |
+
lr: 0.01
|
| 68 |
+
clip_grad_norm: 0.
|
| 69 |
+
|
| 70 |
+
# evaluation
|
| 71 |
+
N_autoregressive: 1
|
| 72 |
+
N_plot: 1
|
configs/cylinder/mwt.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "mwt_cylinder"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "cylinder"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# model
|
| 20 |
+
model_name: "mwt"
|
| 21 |
+
checkpoint_path: "./results/mwt/mwt_cylinder_numerical_False/2025-09-16_07-49-46/model_4200.pth"
|
| 22 |
+
|
| 23 |
+
k: 3 # wavelet filter size
|
| 24 |
+
alpha: 5 # fourier filter parameter, should be less or equal to T/2
|
| 25 |
+
c: 4 # increase factor
|
| 26 |
+
nCZ: 4 # number of CZ blocks
|
| 27 |
+
L: 0
|
| 28 |
+
base: "legendre" # chebyshev
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# training
|
| 32 |
+
is_use_tb: True
|
| 33 |
+
scheduler: cosine # step, cosine
|
| 34 |
+
step_size: 1000 # only applicable for step scheduler
|
| 35 |
+
num_update: 5000
|
| 36 |
+
test_interval: 200
|
| 37 |
+
train_batch_size: 32
|
| 38 |
+
test_batch_size: 64
|
| 39 |
+
lr: 0.001
|
| 40 |
+
clip_grad_norm: 0.
|
| 41 |
+
|
| 42 |
+
# evaluation
|
| 43 |
+
N_autoregressive: 3
|
| 44 |
+
N_plot: 5
|
configs/cylinder/trainsolver.yaml
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "transolver_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "transolver"
|
| 18 |
+
space_dim: 3
|
| 19 |
+
n_layers: 1
|
| 20 |
+
n_hidden: 256
|
| 21 |
+
n_head: 8
|
| 22 |
+
H: 128
|
| 23 |
+
W: 64
|
| 24 |
+
D: 20
|
| 25 |
+
fun_dim: 0 # 1
|
| 26 |
+
out_dim: 3
|
| 27 |
+
ref: 4
|
| 28 |
+
dropout: 0.1
|
| 29 |
+
act: "gelu"
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
slice_num: 16
|
| 32 |
+
|
| 33 |
+
checkpoint_path: "./results/transolver/transolver_cylinder_real_True/2025-09-14_11-11-47/model_0800.pth"
|
| 34 |
+
# training
|
| 35 |
+
is_use_tb: True
|
| 36 |
+
scheduler: cosine # step, cosine
|
| 37 |
+
step_size: 1000 # only applicable for step scheduler
|
| 38 |
+
num_update: 5000
|
| 39 |
+
test_interval: 200
|
| 40 |
+
train_batch_size: 16
|
| 41 |
+
test_batch_size: 16
|
| 42 |
+
lr: 0.0007
|
| 43 |
+
clip_grad_norm: 0.
|
| 44 |
+
|
| 45 |
+
# evaluation
|
| 46 |
+
N_autoregressive: 3
|
| 47 |
+
N_plot: 5
|
configs/cylinder/unet.yaml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "unet_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "unet"
|
| 14 |
+
is_resume: False
|
| 15 |
+
checkpoint_path: './results/unet/unet_cylinder_numerical_False/2025-08-27_00-44-01/model_6400.pth' # for resume training
|
| 16 |
+
dim_mults: [1, 2, 4]
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
is_use_tb: True
|
| 22 |
+
epochs: 200
|
| 23 |
+
step_size: 50
|
| 24 |
+
gamma: 0.5
|
| 25 |
+
train_batch_size: 12
|
| 26 |
+
test_batch_size: 12
|
| 27 |
+
lr: 0.0001
|
| 28 |
+
clip_grad_norm: 0.
|
| 29 |
+
scheduler: cosine # step, cosine
|
| 30 |
+
num_update: 10000
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_plot: 1
|
| 34 |
+
N_autoregressive: 5
|
| 35 |
+
probe_diagnostic: True
|
| 36 |
+
N_plot_probe: 12
|
configs/cylinder/wdno.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "wdno_cylinder"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "cylinder"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "none" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "wdno"
|
| 18 |
+
checkpoint_path: ./results/wdno/wdno_cylinder_numerical_False/2025-09-17_06-05-39/model_60000.pth
|
| 19 |
+
dim: 256
|
| 20 |
+
dim_mults: [1, 2]
|
| 21 |
+
wave_type: "bior1.1"
|
| 22 |
+
pad_mode: "zero"
|
| 23 |
+
beta_schedule: "sigmoid"
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 0 # only applicable for step scheduler
|
| 29 |
+
num_update: 120000
|
| 30 |
+
train_batch_size: 16
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.00005
|
| 33 |
+
clip_grad_norm: 1.0
|
| 34 |
+
|
| 35 |
+
# sampling
|
| 36 |
+
sampling_timesteps: 10 # number of sampling timesteps (ddim for faster inference)
|
| 37 |
+
ddim_sampling_eta: 1
|
| 38 |
+
|
| 39 |
+
# evaluation
|
| 40 |
+
N_autoregressive: 5
|
| 41 |
+
N_plot: 0
|
| 42 |
+
probe_diagnostic: True
|
| 43 |
+
N_plot_probe: 0
|
configs/foil/cno.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "cno_foil"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "foil"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
# model
|
| 19 |
+
model_name: "cno"
|
| 20 |
+
checkpoint_path: ./results/cno/cno_foil_numerical_False/2025-09-18_05-34-15/model_4900.pth # for resume training
|
| 21 |
+
|
| 22 |
+
N_layers: 3 # Number of (D) or (U) blocks in the network
|
| 23 |
+
N_res: 1, # Number of (R) blocks per level (except the neck)
|
| 24 |
+
N_res_neck: 8, # Number of (R) blocks in the neck
|
| 25 |
+
channel_multiplier: 32, # How the number of channels evolve?
|
| 26 |
+
conv_kernel: 3, # Size of all the kernels
|
| 27 |
+
cutoff_den: 2.0001, # Filter property 1.
|
| 28 |
+
filter_size: 6, # Filter property 2.
|
| 29 |
+
|
| 30 |
+
lrelu_upsampling: 2, # Filter property 3.
|
| 31 |
+
half_width_mult: 0.8, # Filter property 4.
|
| 32 |
+
radial: False, # Filter property 5. Is filter radial?
|
| 33 |
+
batch_norm: True, # Add BN? We do not add BN in lifting/projection layer
|
| 34 |
+
out_dim: 1, # Target dimension
|
| 35 |
+
out_size: 1, # If out_size is 1, Then out_size = in_size. Else must be int
|
| 36 |
+
expand_input: False, # Start with original in_size, or expand it (pad zeros in the spectrum)
|
| 37 |
+
latent_lift_proj_dim: 64, # Intermediate latent dimension in the lifting/projection layer
|
| 38 |
+
add_inv: True, # Add invariant block (I) after the intermediate connections?
|
| 39 |
+
activation: 'LeakyReLU' # Activation function can be 'LeakyReLU' or 'lrelu'
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# training
|
| 43 |
+
is_use_tb: True
|
| 44 |
+
scheduler: cosine # step, cosine
|
| 45 |
+
step_size: 1000 # only applicable for step scheduler
|
| 46 |
+
num_update: 5000
|
| 47 |
+
train_batch_size: 16
|
| 48 |
+
test_batch_size: 64
|
| 49 |
+
lr: 0.0003
|
| 50 |
+
clip_grad_norm: 0.
|
| 51 |
+
|
| 52 |
+
# evaluation
|
| 53 |
+
N_autoregressive: 1
|
| 54 |
+
N_plot: 1
|
configs/foil/deeponet.yaml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: deeponet_foil
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 1234567
|
| 4 |
+
results_path: ./results/
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: foil
|
| 8 |
+
dataset_root: /wutailin/real_benchmark/
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: gaussian # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: deeponet
|
| 18 |
+
checkpoint_path: ./results/deeponet/deeponet_foil_numerical_False/2025-09-19_01-39-51/model_0160.pth
|
| 19 |
+
|
| 20 |
+
p: 256
|
| 21 |
+
dropout_rate: 0
|
| 22 |
+
|
| 23 |
+
# training
|
| 24 |
+
is_use_tb: true
|
| 25 |
+
scheduler: cosine # step, cosine
|
| 26 |
+
step_size: 1000 # only applicable for step scheduler
|
| 27 |
+
num_update: 4000
|
| 28 |
+
train_batch_size: 32
|
| 29 |
+
test_batch_size: 64
|
| 30 |
+
lr: 0.0001
|
| 31 |
+
clip_grad_norm: 0.
|
| 32 |
+
|
| 33 |
+
# evaluation
|
| 34 |
+
N_autoregressive: 1
|
| 35 |
+
N_plot: 1
|
configs/foil/dmd.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dmd_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "dmd"
|
| 14 |
+
is_resume: False
|
| 15 |
+
n_modes: 10
|
| 16 |
+
n_predict: 20
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
input_feature: 2
|
| 20 |
+
checkpoint_path: 'no model needed'
|
| 21 |
+
test_batch_size: 12
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# evaluation
|
| 25 |
+
N_plot: 1
|
| 26 |
+
N_autoregressive: 1
|
configs/foil/dpot_l.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_l_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_L.pth" # Large DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Large model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1536
|
| 27 |
+
depth: 24
|
| 28 |
+
n_blocks: 16
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
out_layer_dim: 1536 # Match checkpoint out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters (from checkpoint)
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 10000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.0001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/foil/dpot_s.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_s_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_S.pth" # Small DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Small model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1024
|
| 27 |
+
depth: 6
|
| 28 |
+
n_blocks: 8
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 1
|
| 31 |
+
out_layer_dim: 32 # Keep default out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 10000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|
configs/foil/fno.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "fno_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "fno"
|
| 18 |
+
checkpoint_path: ./results/fno/fno_foil_numerical_False/2025-09-17_14-33-39/model_3760.pth
|
| 19 |
+
modes1: 4
|
| 20 |
+
modes2: 12
|
| 21 |
+
modes3: 16
|
| 22 |
+
n_layers: 4
|
| 23 |
+
width: 64
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 1000 # only applicable for step scheduler
|
| 29 |
+
num_update: 4000
|
| 30 |
+
train_batch_size: 32
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.0001
|
| 33 |
+
clip_grad_norm: 0.
|
| 34 |
+
|
| 35 |
+
# evaluation
|
| 36 |
+
N_autoregressive: 1
|
| 37 |
+
N_plot: 0
|
configs/foil/galerkin_transformer.yaml
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "galerkin_transformer_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "galerkin_transformer"
|
| 18 |
+
checkpoint_path: ./results/galerkin_transformer/galerkin_transformer_foil_numerical_False/2025-09-17_19-23-35/model_0600.pth
|
| 19 |
+
|
| 20 |
+
# modes1: 4
|
| 21 |
+
# modes2: 12
|
| 22 |
+
# modes3: 16
|
| 23 |
+
# n_layers: 4
|
| 24 |
+
# width: 64
|
| 25 |
+
|
| 26 |
+
pos_dim: 1 # pos dim
|
| 27 |
+
n_hidden: 256
|
| 28 |
+
num_feat_layers: 0
|
| 29 |
+
num_encoder_layers: 1
|
| 30 |
+
n_head: 4
|
| 31 |
+
dim_feedforward: 256
|
| 32 |
+
feat_extract_type: null
|
| 33 |
+
attention_type: galerkin
|
| 34 |
+
xavier_init: 0.01
|
| 35 |
+
diagonal_weight: 0.01
|
| 36 |
+
symmetric_init: False
|
| 37 |
+
layer_norm: False
|
| 38 |
+
attn_norm: True
|
| 39 |
+
norm_eps: 0.0000001
|
| 40 |
+
batch_norm: False
|
| 41 |
+
return_attn_weight: False
|
| 42 |
+
return_latent: False
|
| 43 |
+
decoder_type: ifft2
|
| 44 |
+
spacial_dim: 3
|
| 45 |
+
spacial_fc: True
|
| 46 |
+
upsample_mode: interp
|
| 47 |
+
downsample_mode: interp
|
| 48 |
+
freq_dim: 128
|
| 49 |
+
boundary_condition: None
|
| 50 |
+
num_regressor_layers: 1
|
| 51 |
+
fourier_modes_x: 12
|
| 52 |
+
fourier_modes_y: 16
|
| 53 |
+
fourier_modes_t: 4
|
| 54 |
+
regressor_activation: silu
|
| 55 |
+
downscaler_activation: relu
|
| 56 |
+
upscaler_activation: silu
|
| 57 |
+
last_activation: True
|
| 58 |
+
dropout: 0.0
|
| 59 |
+
downscaler_dropout: 0.05
|
| 60 |
+
upscaler_dropout: 0.0
|
| 61 |
+
ffn_dropout: 0.05
|
| 62 |
+
encoder_dropout: 0.05
|
| 63 |
+
decoder_dropout: 0
|
| 64 |
+
debug: False
|
| 65 |
+
|
| 66 |
+
# training
|
| 67 |
+
is_use_tb: True
|
| 68 |
+
scheduler: cosine # step, cosine
|
| 69 |
+
step_size: 1000 # only applicable for step scheduler
|
| 70 |
+
num_update: 5000
|
| 71 |
+
train_batch_size: 16
|
| 72 |
+
test_batch_size: 16
|
| 73 |
+
lr: 0.0001
|
| 74 |
+
clip_grad_norm: 0.
|
| 75 |
+
|
| 76 |
+
# evaluation
|
| 77 |
+
N_autoregressive: 1
|
| 78 |
+
N_plot: 1
|
configs/foil/mwt.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "mwt_foil"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "foil"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.1
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# model
|
| 20 |
+
model_name: "mwt"
|
| 21 |
+
checkpoint_path: ./results/mwt/mwt_foil_numerical_False/2025-09-18_08-56-12/model_2000.pth
|
| 22 |
+
|
| 23 |
+
k: 3 # wavelet filter size
|
| 24 |
+
alpha: 5 # fourier filter parameter, should be less or equal to T/2
|
| 25 |
+
c: 4 # increase factor
|
| 26 |
+
nCZ: 4 # number of CZ blocks
|
| 27 |
+
L: 0
|
| 28 |
+
base: "legendre" # chebyshev
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# training
|
| 32 |
+
is_use_tb: True
|
| 33 |
+
scheduler: cosine # step, cosine
|
| 34 |
+
step_size: 1000 # only applicable for step scheduler
|
| 35 |
+
num_update: 5000
|
| 36 |
+
test_interval: 200
|
| 37 |
+
train_batch_size: 32
|
| 38 |
+
test_batch_size: 64
|
| 39 |
+
lr: 0.0005
|
| 40 |
+
clip_grad_norm: 0.
|
| 41 |
+
|
| 42 |
+
# evaluation
|
| 43 |
+
N_autoregressive: 1
|
| 44 |
+
N_plot: 1
|
configs/foil/trainsolver.yaml
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "transolver"
|
| 18 |
+
space_dim: 3
|
| 19 |
+
n_layers: 1
|
| 20 |
+
n_hidden: 256
|
| 21 |
+
n_head: 8
|
| 22 |
+
H: 128
|
| 23 |
+
W: 64
|
| 24 |
+
D: 20
|
| 25 |
+
fun_dim: 0 # 1
|
| 26 |
+
out_dim: 3
|
| 27 |
+
ref: 4
|
| 28 |
+
dropout: 0.1
|
| 29 |
+
act: "gelu"
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
slice_num: 16
|
| 32 |
+
|
| 33 |
+
checkpoint_path: ./results/transolver/foil_numerical_False/2025-09-18_01-26-24/model_1100.pth
|
| 34 |
+
# training
|
| 35 |
+
is_use_tb: True
|
| 36 |
+
scheduler: cosine # step, cosine
|
| 37 |
+
step_size: 1000 # only applicable for step scheduler
|
| 38 |
+
num_update: 5000
|
| 39 |
+
test_interval: 200
|
| 40 |
+
train_batch_size: 16
|
| 41 |
+
test_batch_size: 16
|
| 42 |
+
lr: 0.0007
|
| 43 |
+
clip_grad_norm: 0.
|
| 44 |
+
|
| 45 |
+
# evaluation
|
| 46 |
+
N_autoregressive: 1
|
| 47 |
+
N_plot: 1
|
configs/foil/unet.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "unet_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "unet"
|
| 14 |
+
is_resume: False
|
| 15 |
+
checkpoint_path: './results/unet/unet_foil_numerical_False/2025-09-19_08-30-43/model_8800.pth' # for resume training
|
| 16 |
+
dim_mults: [1, 2, 4]
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
|
| 20 |
+
# training
|
| 21 |
+
is_use_tb: True
|
| 22 |
+
epochs: 200
|
| 23 |
+
step_size: 50
|
| 24 |
+
gamma: 0.5
|
| 25 |
+
train_batch_size: 12
|
| 26 |
+
test_batch_size: 12
|
| 27 |
+
lr: 0.0001
|
| 28 |
+
clip_grad_norm: 0.
|
| 29 |
+
scheduler: cosine # step, cosine
|
| 30 |
+
num_update: 10000
|
| 31 |
+
|
| 32 |
+
# evaluation
|
| 33 |
+
N_plot: 1
|
| 34 |
+
N_autoregressive: 1
|
configs/foil/wdno.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "wdno_foil"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "foil"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "none" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "wdno"
|
| 18 |
+
checkpoint_path: ./results/wdno/wdno_foil_numerical_False/2025-09-17_17-49-59/model_79200.pth
|
| 19 |
+
dim: 256
|
| 20 |
+
dim_mults: [1, 2]
|
| 21 |
+
wave_type: bior1.3
|
| 22 |
+
pad_mode: "zero"
|
| 23 |
+
beta_schedule: "sigmoid"
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 0 # only applicable for step scheduler
|
| 29 |
+
num_update: 120000
|
| 30 |
+
train_batch_size: 16
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.00005
|
| 33 |
+
clip_grad_norm: 1.0
|
| 34 |
+
|
| 35 |
+
# sampling
|
| 36 |
+
sampling_timesteps: 20 # number of sampling timesteps (ddim for faster inference)
|
| 37 |
+
ddim_sampling_eta: 1.
|
| 38 |
+
|
| 39 |
+
# evaluation
|
| 40 |
+
N_autoregressive: 1
|
| 41 |
+
N_plot: 0
|
configs/fsi/cno.yaml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
exp_name: "cno_fsi"
|
| 4 |
+
gpu: 0
|
| 5 |
+
seed: 0
|
| 6 |
+
results_path: "./results/"
|
| 7 |
+
|
| 8 |
+
# data
|
| 9 |
+
dataset_name: "fsi"
|
| 10 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 11 |
+
num_workers: 12
|
| 12 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 13 |
+
|
| 14 |
+
# data parameters for training
|
| 15 |
+
mask_prob: 0.5
|
| 16 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 17 |
+
|
| 18 |
+
# model
|
| 19 |
+
model_name: "cno"
|
| 20 |
+
checkpoint_path: ./results/cno/cno_fsi_numerical_False/numerical1/model_4300.pth # for resume training
|
| 21 |
+
N_layers: 3 # Number of (D) or (U) blocks in the network
|
| 22 |
+
|
| 23 |
+
N_res: 1, # Number of (R) blocks per level (except the neck)
|
| 24 |
+
N_res_neck: 8, # Number of (R) blocks in the neck
|
| 25 |
+
channel_multiplier: 32, # How the number of channels evolve?
|
| 26 |
+
conv_kernel: 3, # Size of all the kernels
|
| 27 |
+
cutoff_den: 2.0001, # Filter property 1.
|
| 28 |
+
filter_size: 6, # Filter property 2.
|
| 29 |
+
lrelu_upsampling: 2, # Filter property 3.
|
| 30 |
+
half_width_mult: 0.8, # Filter property 4.
|
| 31 |
+
radial: False, # Filter property 5. Is filter radial?
|
| 32 |
+
batch_norm: True, # Add BN? We do not add BN in lifting/projection layer
|
| 33 |
+
out_dim: 1, # Target dimension
|
| 34 |
+
out_size: 1, # If out_size is 1, Then out_size = in_size. Else must be int
|
| 35 |
+
expand_input: False, # Start with original in_size, or expand it (pad zeros in the spectrum)
|
| 36 |
+
latent_lift_proj_dim: 64, # Intermediate latent dimension in the lifting/projection layer
|
| 37 |
+
add_inv: True, # Add invariant block (I) after the intermediate connections?
|
| 38 |
+
activation: 'LeakyReLU' # Activation function can be 'LeakyReLU' or 'lrelu'
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: cosine # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 5000
|
| 46 |
+
train_batch_size: 32
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.001
|
| 49 |
+
clip_grad_norm: 0.
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 3
|
| 53 |
+
N_plot: 1
|
configs/fsi/deeponet.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "deeponet_fsi"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "fsi"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 12
|
| 10 |
+
normalizer: "gaussian" # none, gaussian, range
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.5
|
| 14 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# model
|
| 18 |
+
model_name: "deeponet"
|
| 19 |
+
checkpoint_path: ./results/deeponet/deeponet_fsi_numerical_False/2025-08-28_04-21-47/model_3100.pth
|
| 20 |
+
p : 64
|
| 21 |
+
dropout_rate: 0.1
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# training
|
| 26 |
+
is_use_tb: True
|
| 27 |
+
scheduler: cosine # step, cosine
|
| 28 |
+
step_size: 1000 # only applicable for step scheduler
|
| 29 |
+
num_update: 5000
|
| 30 |
+
train_batch_size: 64
|
| 31 |
+
test_batch_size: 64
|
| 32 |
+
lr: 0.001
|
| 33 |
+
clip_grad_norm: 0.
|
| 34 |
+
|
| 35 |
+
# evaluation
|
| 36 |
+
N_autoregressive: 1
|
| 37 |
+
N_plot: 1
|
configs/fsi/dmd.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dmd_fsi"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "fsi"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 10
|
| 10 |
+
normalizer: "gaussian"
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model_name: "dmd"
|
| 14 |
+
is_resume: False
|
| 15 |
+
n_modes: 10
|
| 16 |
+
n_predict: 20
|
| 17 |
+
mask_prob: 0.5
|
| 18 |
+
noise_scale: 0.1 # only applicable for numerical data
|
| 19 |
+
input_feature: 2
|
| 20 |
+
checkpoint_path: 'no model needed'
|
| 21 |
+
test_batch_size: 12
|
| 22 |
+
|
| 23 |
+
# evaluation
|
| 24 |
+
N_plot: 1
|
| 25 |
+
N_autoregressive: 1
|
configs/fsi/dpot_l.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
exp_name: "dpot_l_fsi"
|
| 2 |
+
gpu: 0
|
| 3 |
+
seed: 0
|
| 4 |
+
results_path: "./results/"
|
| 5 |
+
|
| 6 |
+
# data
|
| 7 |
+
dataset_name: "fsi"
|
| 8 |
+
dataset_root: "/wutailin/real_benchmark/"
|
| 9 |
+
num_workers: 16
|
| 10 |
+
normalizer: "gaussian" # gaussian, range, none
|
| 11 |
+
|
| 12 |
+
# data parameters for training
|
| 13 |
+
mask_prob: 0.1
|
| 14 |
+
noise_scale: 0.5 # only applicable for numerical data
|
| 15 |
+
|
| 16 |
+
# model
|
| 17 |
+
model_name: "dpot"
|
| 18 |
+
checkpoint_path: "./dpot_ckpts/model_L.pth" # Large DPOT model
|
| 19 |
+
model_type: "dpot" # "dpot" or "dpot3d"
|
| 20 |
+
|
| 21 |
+
# DPOT specific parameters (Large model configuration - from checkpoint)
|
| 22 |
+
img_size: 128 # Pre-trained model resolution (with auto-resize for combustion)
|
| 23 |
+
patch_size: 8
|
| 24 |
+
in_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 25 |
+
out_channels: 4 # We follow the original DPOT workflow to pad the channels to 4, if channels is less than 4
|
| 26 |
+
embed_dim: 1536
|
| 27 |
+
depth: 24
|
| 28 |
+
n_blocks: 16
|
| 29 |
+
modes: 32 # Keep default modes for AFNO
|
| 30 |
+
mlp_ratio: 4
|
| 31 |
+
out_layer_dim: 1536 # Match checkpoint out layer dim
|
| 32 |
+
normalize: False
|
| 33 |
+
act: "gelu"
|
| 34 |
+
time_agg: "exp_mlp"
|
| 35 |
+
n_cls: 12 # Match checkpoint pre-training (12 datasets)
|
| 36 |
+
|
| 37 |
+
# dpot multi-step specific parameters (from checkpoint)
|
| 38 |
+
in_timesteps: 20
|
| 39 |
+
out_timesteps: 20
|
| 40 |
+
|
| 41 |
+
# training
|
| 42 |
+
is_use_tb: True
|
| 43 |
+
scheduler: "cosine" # step, cosine
|
| 44 |
+
step_size: 1000 # only applicable for step scheduler
|
| 45 |
+
num_update: 10000
|
| 46 |
+
train_batch_size: 16
|
| 47 |
+
test_batch_size: 64
|
| 48 |
+
lr: 0.0001
|
| 49 |
+
clip_grad_norm: 1
|
| 50 |
+
|
| 51 |
+
# evaluation
|
| 52 |
+
N_autoregressive: 1
|
| 53 |
+
N_plot: 5
|
| 54 |
+
|