SeqTex-Transformer / config.json
yuanze1024's picture
Upload SeqTex transformer with LoRA adapter
10c57cf verified
{
"_class_name": "WanT2TexTransformer3DModel",
"_diffusers_version": "0.33.1",
"_name_or_path": "/mnt/pfs/share/pretrained_model/.cache/huggingface/hub/models--Wan-AI--Wan2.1-T2V-1.3B-Diffusers/snapshots/0fad780a534b6463e45facd96134c9f345acfa5b/transformer",
"added_kv_proj_dim": null,
"attention_head_dim": 128,
"cross_attn_norm": true,
"eps": 1e-06,
"ffn_dim": 8960,
"freq_dim": 256,
"image_dim": null,
"in_channels": 16,
"num_attention_heads": 12,
"num_layers": 30,
"out_channels": 16,
"patch_size": [
1,
2,
2
],
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 1024,
"text_dim": 4096
}