File size: 1,300 Bytes
be1eb72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
{
  "architectures": [
    "FIMMJP"
  ],
  "dtype": "float32",
  "initial_distribution_decoder": {
    "dropout": 0.1,
    "hidden_act": {
      "name": "torch.nn.SELU"
    },
    "hidden_layers": [
      128,
      128
    ],
    "initialization_scheme": "lecun_normal",
    "name": "fim.models.blocks.base.MLP"
  },
  "intensity_matrix_decoder": {
    "dropout": 0.1,
    "hidden_act": {
      "name": "torch.nn.SELU"
    },
    "hidden_layers": [
      128,
      128
    ],
    "initialization_scheme": "lecun_normal",
    "name": "fim.models.blocks.base.MLP"
  },
  "model_type": "fimmjp",
  "n_states": 6,
  "path_attention": {
    "embed_dim": 256,
    "n_heads": 4,
    "n_queries": 16,
    "name": "fim.models.blocks.MultiHeadLearnableQueryAttention",
    "output_projection": true
  },
  "pos_encodings": {
    "name": "fim.models.blocks.positional_encodings.SineTimeEncoding",
    "out_features": 256
  },
  "transformers_version": "4.57.1",
  "ts_encoder": {
    "encoder_layer": {
      "batch_first": true,
      "d_model": 256,
      "dim_feedforward": 1024,
      "dropout": 0.1,
      "name": "torch.nn.TransformerEncoderLayer",
      "nhead": 4
    },
    "name": "torch.nn.TransformerEncoder",
    "num_layers": 4
  },
  "use_adjacency_matrix": false,
  "use_num_of_paths": true
}