File size: 4,044 Bytes
6a2870f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
{
  "name": "model_2",
  "model_type": "bi-lstm-attn-text",
  "layers": [{
    "class_name": "InputLayer", 
      "config": {
        "batch_input_shape": [null, 1000], 
        "dtype": "float32", 
        "sparse": false, 
        "ragged": false, 
        "name": "Encoder_Inputs"
      }, 
    "name": "Encoder_Inputs", 
    "inbound_nodes": []
  },
    {
      "class_name": "Embedding", 
      "config": {
        "name": "Encoder_Embedding", 
        "trainable": true, 
        "batch_input_shape": [null, 1000], 
        "dtype": "float32", 
        "input_dim": 1056527, 
        "output_dim": 300, 
        "embeddings_initializer": {
          "class_name": "RandomUniform", 
          "config": {
            "minval": -0.05, 
            "maxval": 0.05, 
            "seed": null}}, 
        "embeddings_regularizer": null, 
        "activity_regularizer": null, 
        "embeddings_constraint": null, 
        "mask_zero": false, 
        "input_length": 1000}, 
      "name": "Encoder_Embedding", 
      "inbound_nodes": [[["Encoder_Inputs", 0, 0, {}]]]}, {
      "class_name": "Bidirectional", 
      "config": {
        "name": "Encoder_BiLSTM_Layer1", 
        "trainable": true, 
        "dtype": "float32", 
        "layer": {"class_name": "LSTM", 
          "config": {
            "name": "lstm", 
            "trainable": true, 
            "dtype": "float32", 
            "return_sequences": true, 
            "return_state": true, 
            "go_backwards": false, 
            "stateful": false, 
            "unroll": false, 
            "time_major": false, 
            "units": 128, 
            "activation": 
            "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.2, "recurrent_dropout": 0.2, "implementation": 1}}, "merge_mode": "concat"}, "name": "Encoder_BiLSTM_Layer1", "inbound_nodes": [[["Encoder_Embedding", 0, 0, {}]]]}, {"class_name": "Bidirectional", "config": {"name": "Encoder_BiLSTM_Layer2", "trainable": true, "dtype": "float32", "layer": {"class_name": "LSTM", "config": {"name": "lstm_1", "trainable": true, "dtype": "float32", "return_sequences": true, "return_state": true, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 128, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.2, "recurrent_dropout": 0.2, "implementation": 1}}, "merge_mode": "concat"}, "name": "Encoder_BiLSTM_Layer2", "inbound_nodes": [[["Encoder_BiLSTM_Layer1", 0, 0, {}]]]}, {"class_name": "Concatenate", "config": {"name": "concatenate", "trainable": true, "dtype": "float32", "axis": -1}, "name": "concatenate", "inbound_nodes": [[["Encoder_BiLSTM_Layer2", 0, 1, {}], ["Encoder_BiLSTM_Layer2", 0, 3, {}]]]}, {"class_name": "Concatenate", "config": {"name": "concatenate_1", "trainable": true, "dtype": "float32", "axis": -1}, "name": "concatenate_1", "inbound_nodes": [[["Encoder_BiLSTM_Layer2", 0, 2, {}], ["Encoder_BiLSTM_Layer2", 0, 4, {}]]]}], "input_layers": [["Encoder_Inputs", 0, 0]], "output_layers": [["Encoder_BiLSTM_Layer2", 0, 0], ["concatenate", 0, 0], ["concatenate_1", 0, 0]]}