| { | |
| "_class_name": "GlmImageTransformer2DModel", | |
| "_diffusers_version": "0.37.0.dev0", | |
| "_name_or_path": "/mnt/disk5/lvl/GLM-Image/transformer", | |
| "attention_head_dim": 128, | |
| "condition_dim": 256, | |
| "in_channels": 16, | |
| "num_attention_heads": 32, | |
| "num_layers": 30, | |
| "out_channels": 16, | |
| "patch_size": 2, | |
| "prior_vq_quantizer_codebook_size": 16384, | |
| "quantization_config": { | |
| "autoround_version": "0.12.0", | |
| "batch_size": 1, | |
| "bits": 4, | |
| "block_name_to_quantize": "transformer_blocks", | |
| "data_type": "int", | |
| "group_size": 128, | |
| "packing_format": "auto_round:auto_gptq", | |
| "quant_method": "auto-round", | |
| "sym": true | |
| }, | |
| "text_embed_dim": 1472, | |
| "time_embed_dim": 512 | |
| } | |