VincentStimper commited on
Commit
d9831b2
·
unverified ·
1 Parent(s): 5a29485

model: added aldp model and config

Browse files
Files changed (2) hide show
  1. aldp/config.yaml +73 -0
  2. aldp/model.pt +3 -0
aldp/config.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Config file specifying the setup of a Boltzmann Generator
2
+
3
+
4
+ data:
5
+ transform: experiments/aldp/data/position_min_energy.pt
6
+ test: data/val.pt
7
+
8
+
9
+ system: # Properties of molecular system
10
+
11
+ temperature: 300 # Double, temperature of the system
12
+ energy_cut: 1.e+8 # Double, energy level at which regularization shall be applied
13
+ energy_max: 1.e+20 # Double, maximum level at which energies will be clamped
14
+ n_threads: 18 # Int, number of threads to be used, number of cores if null
15
+ transform: internal
16
+ shift_dih: False
17
+ env: implicit
18
+
19
+
20
+ flow: # Properties of the flow model
21
+
22
+ type: circular-coup-nsf # String, type of the flow
23
+ base: # Base distribution
24
+ type: gauss-uni # Type of the base dist
25
+ params: null
26
+ blocks: 12 # Int, number of Real NVP blocks, consisting of an ActNorm layer
27
+ # if specified, a permutation, and a affine coupling layer
28
+ actnorm: False # Bool, flag whether to include an ActNorm layers
29
+ mixing: null # String, how features are mixed
30
+ circ_shift: random # String, whether to shift circular coordinates, can be none,
31
+ # constant, or random
32
+ blocks_per_layer: 1 # Int, number of blocks per layer
33
+ hidden_units: 256 # Int, number of hidden units of the NN in neural spline layers
34
+ num_bins: 8 # Int, number of bins of the neural splines
35
+ init_identity: True # Bool, flag whether to initialize layers as identity map
36
+ dropout: 0. # Float, dropout probability for the NN layers
37
+
38
+
39
+ fab:
40
+
41
+ transition_type: hmc # String, type of transition operator used
42
+ n_int_dist: 8 # Int, number of intermediate distributions
43
+ n_inner: 4 # Int, number of steps between intermediate distributions
44
+ epsilon: 0.1
45
+ adjust_step_size: True
46
+ loss_type: fab_alpha_div # String, loss to be used
47
+ alpha: 2.
48
+
49
+
50
+
51
+ training: # Properties of the training procedure
52
+
53
+ max_iter: 50000 # Int, maximum number of iteration
54
+ warmup_iter: 1000
55
+ optimizer: adam # String, name of the optimizer
56
+ batch_size: 1024 # Int, batch size used during training
57
+ learning_rate: 5.e-4 # Double, learning rate used during training
58
+ lr_scheduler:
59
+ type: cosine # String, kind of LR scheduler, can be exponential, cosine
60
+ replay_buffer:
61
+ type: prioritised # String, way to sample from the buffer, can be uniform or prioritised
62
+ n_updates: 8 # Int, number of updates to do after each sampling step
63
+ min_length: 64 # Int, minimum number of batches in replay buffer
64
+ max_length: 512 # Int, maximum number of batches in replay buffer
65
+ max_adjust_w_clip: 10 # Double, fraction of weights to clip per batch
66
+ max_grad_norm: 1.e3 # Double, limit for gradient clipping
67
+ weight_decay: 1.e-5 # Double, regularization parameter
68
+ log_iter: 5000 # Int, number of iterations after which loss is saved
69
+ checkpoint_iter: 25000 # Int, number of iterations after which checkpoint is saved
70
+ eval_samples: 0 # Int, number of samples to draw when evaluating the model
71
+ filter_chirality: train # Bool, flag whether to filter for chirality during evaluation
72
+ seed: 0 # Int, seed to be used for the random number generator
73
+ save_root: out/fab_buff
aldp/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b20e41af15d1a31db8eedaacca6772aa3596d1c9023542531e7aab253f71004
3
+ size 32082631