File size: 1,641 Bytes
5ec22c3
0adc994
5ec22c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0adc994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ec22c3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
{
  "cfg": {
    "gpt_model": {
      "bias": true,
      "encoder_with_cls_token": true,
      "eps": 1e-06,
      "n_embd": 1536,
      "n_head": 12,
      "n_layer": 23,
      "n_single_layer": 1,
      "rope_theta": 10000,
      "shape_model_embed_dim": 32,
      "shape_model_vocab_size": 16384,
      "text_model_embed_dim": 768,
      "use_pooled_text_embed": false
    },
    "shape_model": {
      "embed_dim": 32,
      "embed_point_feats": false,
      "encoder_cross_attention_levels": [
        0,
        2,
        4,
        8
      ],
      "encoder_with_cls_token": true,
      "eps": 1e-06,
      "num_codes": 16384,
      "num_decoder_latents": 0,
      "num_decoder_layers": 24,
      "num_encoder_latents": 512,
      "num_encoder_layers": 13,
      "num_freqs": 128,
      "num_heads": 12,
      "out_dim": 1,
      "point_feats": 3,
      "width": 768
    },
    "text_model_pretrained_model_name_or_path": "not-lain/cube"
  },
  "clip_config": {
    "_attn_implementation_autoset": true,
    "attention_dropout": 0.0,
    "bos_token_id": 0,
    "dropout": 0.0,
    "eos_token_id": 2,
    "hidden_act": "quick_gelu",
    "hidden_size": 768,
    "initializer_factor": 1.0,
    "initializer_range": 0.02,
    "intermediate_size": 3072,
    "layer_norm_eps": 1e-05,
    "max_position_embeddings": 77,
    "model_type": "clip_text_model",
    "num_attention_heads": 12,
    "num_hidden_layers": 12,
    "pad_token_id": 1,
    "projection_dim": 768,
    "torch_dtype": "float32",
    "transformers_version": "4.50.1",
    "vocab_size": 49408
  },
  "config_path": null,
  "gpt_ckpt_path": null,
  "shape_ckpt_path": null
}