File size: 1,906 Bytes
daf4683 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | {
"_class_name": "QwenImageTransformer2DModel",
"_diffusers_version": "0.37.0.dev0",
"_name_or_path": "/home/user/.cache/huggingface/hub/models--FireRedTeam--FireRed-Image-Edit-1.0/snapshots/0aea9d520c801c9f4b691cc92f736a7a8628e6a2/transformer",
"attention_head_dim": 128,
"axes_dims_rope": [
16,
56,
56
],
"guidance_embeds": false,
"in_channels": 64,
"joint_attention_dim": 3584,
"num_attention_heads": 24,
"num_layers": 60,
"out_channels": 16,
"patch_size": 2,
"quantization_config": {
"config_groups": {
"group_0": {
"format": "float-quantized",
"input_activations": {
"actorder": null,
"block_structure": null,
"dynamic": true,
"group_size": null,
"num_bits": 8,
"observer": null,
"observer_kwargs": {},
"scale_dtype": null,
"strategy": "token",
"symmetric": true,
"type": "float",
"zp_dtype": null
},
"output_activations": null,
"targets": [
"Linear"
],
"weights": {
"actorder": null,
"block_structure": null,
"dynamic": false,
"group_size": null,
"num_bits": 8,
"observer": "memoryless_minmax",
"observer_kwargs": {},
"scale_dtype": null,
"strategy": "channel",
"symmetric": true,
"type": "float",
"zp_dtype": null
}
}
},
"format": "float-quantized",
"global_compression_ratio": null,
"ignore": [
"proj_out"
],
"kv_cache_scheme": null,
"quant_method": "compressed-tensors",
"quantization_status": "compressed",
"sparsity_config": {},
"transform_config": {},
"version": "0.13.1.a20260219"
},
"use_additional_t_cond": false,
"use_layer3d_rope": false,
"zero_cond_t": false
} |