File size: 2,951 Bytes
ecea315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c32c7e6
 
ecea315
 
 
 
 
 
 
 
2b6e7de
ecea315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee3f4d0
ecea315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
{
  "name": "alien_iris_world_model",
  "env": "AlienNoFrameskip-v4",
  "model_type": "iris",
  "metadata": {
    "latent_dim": [1, 4, 1024],
    "two_hot_rews": true,
    "tokens_per_block": 6,
    "num_tokens": 4,
    "tokens_grid_res": 2,
    "token_res": 16384
  },
  "util_folders":{
    "models": "../../src/models"
  },
  "requirements":{
    "-r": "requirements.txt"
  },
  "models": [
    {
      "name": "world_model",
      "framework": null,
      "format": "state_dict",
      "source": {
        "weights_path": "world_model.pt",
        "class_path": "../../src/world_model.py",
        "class_name": "WorldModel",
        "class_args": [
        {
          "vocab_size": 512,
          "act_vocab_size": 18,
          "tokens_per_block": 17,
          "max_blocks": 20,
          "attention": "causal",
          "num_layers": 10,
          "num_heads": 4,
          "embed_dim": 256,
          "embed_pdrop": 0.1,
          "resid_pdrop": 0.1,
          "attn_pdrop": 0.1
        }]
      },
      "signature": {
        "inputs": ["tokens", "past_keys_values"],
        "call_mode": "positional"
      },
      "sub_models":
      [
        {
          "name": "transformer",
          "sub_model_name": "transformer",
          "signature": 
          {
            "inputs": ["sequences", "past_keys_values"],
            "call_mode": "positional"
          }
        }
      ],
      "methods":
      [
      ]
    },
    {
      "name": "tokenizer",
      "framework": null,
      "format": "state_dict",
      "source": {
        "weights_path": "tokenizer.pt",
        "class_path": "../../src/tokenizer.py",
        "class_name": "Tokenizer",
        "class_args": [{
          "vocab_size": 512,
          "embed_dim": 512,
          "encoder": {
            "resolution": 64,
            "in_channels": 3,
            "z_channels": 512,
            "ch": 64,
            "ch_mult": [1, 1, 1, 1, 1],
            "num_res_blocks": 2,
            "attn_resolutions": [8, 16],
            "out_ch": 3,
            "dropout": 0.0
          },
          "decoder": { 
            "resolution": 64,
            "in_channels": 3,
            "z_channels": 512,
            "ch": 64,
            "ch_mult": [1, 1, 1, 1, 1],
            "num_res_blocks": 2,
            "attn_resolutions": [8, 16],
            "out_ch": 3,
            "dropout": 0.0
          }
        }]
      },
      "signature": {
        "inputs": ["x", "should_preprocess", "should_postprocess"],
        "call_mode": "positional"
      },
      "sub_models":
      [
        {
          "name": "embedding",
          "sub_model_name": "embedding",
          "signature": 
          {
            "call_mode": "auto"
          }
        }
      ],
      "methods":
      [
        {
          "name": "encode",
          "method_name": "encode"
        },
        {
          "name": "decode",
          "method_name": "decode"
        }
      ]
    }
  ]
}