LetsThink's picture
Upload folder using huggingface_hub
db34893 verified
{
"_class_name": "Transformer3DModel",
"_diffusers_version": "0.33.1",
"activation_fn": "gelu-approximate",
"adaln_norm_type": "layernorm",
"add_motion_score": true,
"attention_bias": true,
"attention_head_dim": 64,
"attention_type": "default",
"caption_channels": null,
"cfg_parallel_group": null,
"class_dropout_prob": 0.1,
"compile_mem_gc_threshold": 60,
"cond_channels": 0,
"condition_type": "inpaint_mask",
"cross_attention_dim": 2048,
"desired_step": false,
"double_self_attention": false,
"dropout": 0.0,
"enable_cached_compile": false,
"enable_cfg_parallel": false,
"gradient_checkpointing": "full-block",
"in_channels": 16,
"norm_elementwise_affine": false,
"norm_eps": 1e-05,
"norm_num_groups": 32,
"norm_type": "ada_norm_zero_nolabel",
"num_attention_heads": 48,
"num_block_chunks": 40,
"num_embeds_ada_norm": null,
"num_latent_frames": 24,
"num_layers": 40,
"only_cross_attention": false,
"out_channels": 16,
"patch_size": 2,
"patch_size_t": 1,
"sample_size": 32,
"sequence_parallel_size": 0,
"task_type": "i2v+++",
"time_embed_type": "rope",
"upcast_attention": false,
"use_depth_cond": true,
"use_depth_loss": false,
"use_navit": false,
"use_rope": true,
"vae_stride": 8
}