michaelarbel commited on
Commit
4cc9059
·
verified ·
1 Parent(s): d63e1ec

Create metadata/config.yaml

Browse files
Files changed (1) hide show
  1. equitabpfn/metadata/config.yaml +215 -0
equitabpfn/metadata/config.yaml ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prior:
2
+ num_features: 100
3
+ n_samples: 1152
4
+ eval_positions_prop: 0.95
5
+ heterogeneous_batches: false
6
+ multiclass_loss_type: nono
7
+ boolean:
8
+ max_fraction_uninformative: 0.5
9
+ p_uninformative: 0.5
10
+ prior_type: prior_bag
11
+ prior_bag:
12
+ prior_bag_exp_weights_1:
13
+ distribution: uniform
14
+ min: 2.0
15
+ max: 10.0
16
+ mlp:
17
+ add_uninformative_features: false
18
+ pre_sample_causes: true
19
+ sampling: normal
20
+ prior_mlp_scale_weights_sqrt: true
21
+ random_feature_rotation: true
22
+ num_layers:
23
+ distribution: meta_gamma
24
+ max_alpha: 2
25
+ max_scale: 3
26
+ round: true
27
+ lower_bound: 2
28
+ prior_mlp_hidden_dim:
29
+ distribution: meta_gamma
30
+ max_alpha: 3
31
+ max_scale: 100
32
+ round: true
33
+ lower_bound: 4
34
+ prior_mlp_dropout_prob:
35
+ distribution: meta_beta
36
+ scale: 0.6
37
+ min: 0.1
38
+ max: 5.0
39
+ init_std:
40
+ distribution: log_uniform
41
+ min: 0.01
42
+ max: 12
43
+ noise_std:
44
+ distribution: log_uniform
45
+ min: 0.0001
46
+ max: 0.5
47
+ num_causes:
48
+ distribution: meta_gamma
49
+ max_alpha: 3
50
+ max_scale: 7
51
+ round: true
52
+ lower_bound: 2
53
+ is_causal:
54
+ distribution: meta_choice
55
+ choice_values:
56
+ - true
57
+ - false
58
+ pre_sample_weights:
59
+ distribution: meta_choice
60
+ choice_values:
61
+ - true
62
+ - false
63
+ y_is_effect:
64
+ distribution: meta_choice
65
+ choice_values:
66
+ - true
67
+ - false
68
+ prior_mlp_activations:
69
+ distribution: meta_choice
70
+ choice_values:
71
+ - torch.nn.Tanh
72
+ - torch.nn.Identity
73
+ - torch.nn.ReLU
74
+ block_wise_dropout:
75
+ distribution: meta_choice
76
+ choice_values:
77
+ - true
78
+ - false
79
+ sort_features:
80
+ distribution: meta_choice
81
+ choice_values:
82
+ - true
83
+ - false
84
+ in_clique:
85
+ distribution: meta_choice
86
+ choice_values:
87
+ - true
88
+ - false
89
+ gp:
90
+ outputscale:
91
+ distribution: log_uniform
92
+ min: 1.0e-05
93
+ max: 8
94
+ lengthscale:
95
+ distribution: log_uniform
96
+ min: 1.0e-05
97
+ max: 8
98
+ noise:
99
+ distribution: meta_choice
100
+ choice_values:
101
+ - 1.0e-05
102
+ - 0.0001
103
+ - 0.01
104
+ sampling: normal
105
+ classification:
106
+ max_num_classes: 10
107
+ num_classes:
108
+ distribution: uniform_int
109
+ min: 2
110
+ max: 10
111
+ num_features_used:
112
+ distribution: uniform_int
113
+ min: 1
114
+ max: 100
115
+ balanced: false
116
+ output_multiclass_ordered_p: 0.0
117
+ categorical_feature_p: 0.2
118
+ multiclass_max_steps: 10
119
+ multiclass_type: rank
120
+ nan_prob_unknown_reason_reason_prior: 0.5
121
+ nan_prob_a_reason: 0.0
122
+ nan_prob_no_reason: 0.0
123
+ nan_prob_unknown_reason: 0.0
124
+ set_value_to_nan: 0.1
125
+ model:
126
+ decoder:
127
+ name: equitabpfn.models.decoders.KDEDecoder
128
+ kwargs:
129
+ bw: 1.0
130
+ kernel: gaussian
131
+ pointwise_mlp:
132
+ dim_feedforward: 512
133
+ with_layer_norm: true
134
+ layer_norm_eps: 1
135
+ activation: gelu
136
+ dropout: 0.0
137
+ y_encoder:
138
+ name: equitabpfn.models.encoders.EquiOneHotAndLinear
139
+ kwargs:
140
+ num_classes: 10
141
+ bkbn:
142
+ name: equitabpfn.models.equitabpfn.EquiTabPFN
143
+ kwargs:
144
+ emsize: 512
145
+ nlayers: 6
146
+ dropout: 0.0
147
+ nhead: 4
148
+ nhid_factor: 2
149
+ init_method: xavier-uniform
150
+ recompute_attn: true
151
+ pre_norm: false
152
+ efficient_eval_masking: true
153
+ input_normalization: false
154
+ tabpfn_zero_weights: false
155
+ output_features: all_features
156
+ equivariant_encoder: false
157
+ feature_mask_mode: Bq2Bk
158
+ compile_model: true
159
+ decoder_kwarg:
160
+ name: ktabpfn.models.decoders.KDEDecoder
161
+ kwargs:
162
+ bw: 1.0
163
+ kernel: gaussian
164
+ pointwise_mlp:
165
+ dim_feedforward: 512
166
+ with_layer_norm: true
167
+ layer_norm_eps: 1
168
+ activation: gelu
169
+ dropout: 0.0
170
+ logits: true
171
+ seed: 0
172
+ system:
173
+ device: 0
174
+ dtype: 32
175
+ n_samples: 1152
176
+ max_features: 100
177
+ max_num_classes: 10
178
+ data_path: data
179
+ dataloader:
180
+ batch_size: 24
181
+ num_steps: 384
182
+ min_eval_pos: 2
183
+ max_eval_pos: 1000
184
+ training:
185
+ aggregate_k_gradients: 3
186
+ epochs: 1200
187
+ train_mixed_precision: true
188
+ eval_freq: 10
189
+ ckpt_freq: 10
190
+ compile: false
191
+ optimizer:
192
+ name: torch.optim.AdamW
193
+ kwargs:
194
+ lr: 0.0001
195
+ weight_decay: 0.0
196
+ scheduler:
197
+ warmup_epoch: 10
198
+ first:
199
+ name: torch.optim.lr_scheduler.LinearLR
200
+ kwargs:
201
+ start_factor: 1.0e-10
202
+ end_factor: 1
203
+ total_iters: 10
204
+ second:
205
+ name: torch.optim.lr_scheduler.CosineAnnealingLR
206
+ kwargs:
207
+ T_max: 1190
208
+ eta_min: 1.0e-08
209
+ load:
210
+ model_state_path: ''
211
+ load_model_strict: true
212
+ load_existing_cktp: true
213
+ mode:
214
+ eval_mode: true
215
+ train_mode: true