File size: 1,332 Bytes
8622ebe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
{
  "add_skip_keys": false,
  "dequantize_fp32": false,
  "dynamic_loss_threshold": 0.01,
  "group_size": 0,
  "is_integer": true,
  "is_training": false,
  "modules_dtype_dict": {},
  "modules_to_not_convert": [
    ".norm_out",
    ".t_embedder",
    ".context_embedder",
    ".vid_out",
    ".img_in",
    ".txt_out",
    "patch_embed",
    "prediction_coefs",
    "patch_emb",
    "multi_modal_projector",
    "lm_head",
    "time_text_embed",
    ".img_out",
    "model.embed_tokens",
    "norm_out",
    ".proj_out",
    ".emb_in",
    "patch_embedding",
    ".time_embed",
    "embedding_projection",
    "lm_head.weight",
    "correction_coefs",
    ".x_embedder",
    "embed_tokens",
    ".y_embedder",
    ".condition_embedder",
    "proj_out",
    "model.norm",
    ".vid_in",
    ".txt_in",
    "wte",
    ".final_layer",
    ".emb_out"
  ],
  "non_blocking": false,
  "quant_conv": false,
  "quant_method": "sdnq",
  "quantization_device": null,
  "quantized_matmul_dtype": null,
  "return_device": null,
  "sdnq_version": "0.1.3",
  "svd_rank": 32,
  "svd_steps": 8,
  "use_dynamic_quantization": false,
  "use_grad_ckpt": true,
  "use_quantized_matmul": true,
  "use_quantized_matmul_conv": false,
  "use_static_quantization": true,
  "use_stochastic_rounding": false,
  "use_svd": false,
  "weights_dtype": "int8"
}