mingyu-oo's picture
Upload FLUX.1-Kontext-dev fused with LoRA (0.5 scale)
75a42bc verified
raw
history blame contribute delete
554 Bytes
{
"_class_name": "FluxTransformer2DModel",
"_diffusers_version": "0.36.0.dev0",
"_name_or_path": "/root/.cache/huggingface/hub/models--black-forest-labs--FLUX.1-Kontext-dev/snapshots/af58063aa431f4d2bbc11ae46f57451d4416a170/transformer",
"attention_head_dim": 128,
"axes_dims_rope": [
16,
56,
56
],
"guidance_embeds": true,
"in_channels": 64,
"joint_attention_dim": 4096,
"num_attention_heads": 24,
"num_layers": 19,
"num_single_layers": 38,
"out_channels": null,
"patch_size": 1,
"pooled_projection_dim": 768
}