luxtts / model.safetensors.index.json
evilsocket's picture
Upload folder using huggingface_hub
454a376 verified
{
"metadata": {
"total_size": 245508488
},
"weight_map": {
"embed.weight": "model.safetensors",
"fm_decoder.downsample.1.bias": "model.safetensors",
"fm_decoder.downsample.2.bias": "model.safetensors",
"fm_decoder.downsample.3.bias": "model.safetensors",
"fm_decoder.guidance_scale_embed.weight": "model.safetensors",
"fm_decoder.in_proj.bias": "model.safetensors",
"fm_decoder.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.0.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.0.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.0.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.0.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.norm.bias": "model.safetensors",
"fm_decoder.layers.0.norm.log_scale": "model.safetensors",
"fm_decoder.layers.0.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.0.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.0.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.0.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.0.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.1.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.1.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.1.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.1.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.1.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.norm.bias": "model.safetensors",
"fm_decoder.layers.1.norm.log_scale": "model.safetensors",
"fm_decoder.layers.1.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.1.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.1.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.1.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.1.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.10.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.10.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.10.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.10.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.10.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.norm.bias": "model.safetensors",
"fm_decoder.layers.10.norm.log_scale": "model.safetensors",
"fm_decoder.layers.10.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.10.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.10.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.10.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.10.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.11.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.11.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.11.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.11.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.11.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.norm.bias": "model.safetensors",
"fm_decoder.layers.11.norm.log_scale": "model.safetensors",
"fm_decoder.layers.11.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.11.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.11.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.11.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.11.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.12.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.12.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.12.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.12.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.12.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.norm.bias": "model.safetensors",
"fm_decoder.layers.12.norm.log_scale": "model.safetensors",
"fm_decoder.layers.12.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.12.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.12.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.12.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.12.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.13.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.13.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.13.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.13.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.13.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.norm.bias": "model.safetensors",
"fm_decoder.layers.13.norm.log_scale": "model.safetensors",
"fm_decoder.layers.13.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.13.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.13.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.13.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.13.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.14.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.14.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.14.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.14.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.14.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.norm.bias": "model.safetensors",
"fm_decoder.layers.14.norm.log_scale": "model.safetensors",
"fm_decoder.layers.14.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.14.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.14.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.14.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.14.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.15.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.15.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.15.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.15.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.15.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.norm.bias": "model.safetensors",
"fm_decoder.layers.15.norm.log_scale": "model.safetensors",
"fm_decoder.layers.15.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.15.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.15.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.15.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.15.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.2.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.2.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.2.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.2.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.2.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.norm.bias": "model.safetensors",
"fm_decoder.layers.2.norm.log_scale": "model.safetensors",
"fm_decoder.layers.2.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.2.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.2.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.2.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.2.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.3.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.3.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.3.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.3.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.3.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.norm.bias": "model.safetensors",
"fm_decoder.layers.3.norm.log_scale": "model.safetensors",
"fm_decoder.layers.3.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.3.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.3.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.3.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.3.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.4.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.4.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.4.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.4.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.4.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.norm.bias": "model.safetensors",
"fm_decoder.layers.4.norm.log_scale": "model.safetensors",
"fm_decoder.layers.4.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.4.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.4.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.4.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.4.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.5.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.5.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.5.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.5.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.5.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.norm.bias": "model.safetensors",
"fm_decoder.layers.5.norm.log_scale": "model.safetensors",
"fm_decoder.layers.5.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.5.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.5.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.5.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.5.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.6.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.6.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.6.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.6.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.6.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.norm.bias": "model.safetensors",
"fm_decoder.layers.6.norm.log_scale": "model.safetensors",
"fm_decoder.layers.6.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.6.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.6.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.6.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.6.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.7.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.7.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.7.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.7.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.7.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.norm.bias": "model.safetensors",
"fm_decoder.layers.7.norm.log_scale": "model.safetensors",
"fm_decoder.layers.7.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.7.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.7.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.7.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.7.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.8.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.8.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.8.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.8.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.8.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.norm.bias": "model.safetensors",
"fm_decoder.layers.8.norm.log_scale": "model.safetensors",
"fm_decoder.layers.8.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.8.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.8.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.8.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.8.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.layers.9.bypass.bypass_scale": "model.safetensors",
"fm_decoder.layers.9.bypass_mid.bypass_scale": "model.safetensors",
"fm_decoder.layers.9.conv_module1.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module1.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.9.conv_module1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.conv_module1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.conv_module2.depthwise_conv.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module2.depthwise_conv.weight": "model.safetensors",
"fm_decoder.layers.9.conv_module2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.conv_module2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.conv_module2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward3.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward3.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.feed_forward3.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.feed_forward3.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.nonlin_attention.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.nonlin_attention.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.nonlin_attention.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.nonlin_attention.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.norm.bias": "model.safetensors",
"fm_decoder.layers.9.norm.log_scale": "model.safetensors",
"fm_decoder.layers.9.self_attn1.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.self_attn1.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.self_attn1.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.self_attn1.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.self_attn2.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.self_attn2.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.self_attn2.out_proj.bias": "model.safetensors",
"fm_decoder.layers.9.self_attn2.out_proj.weight": "model.safetensors",
"fm_decoder.layers.9.self_attn_weights.in_proj.bias": "model.safetensors",
"fm_decoder.layers.9.self_attn_weights.in_proj.weight": "model.safetensors",
"fm_decoder.layers.9.self_attn_weights.linear_pos.weight": "model.safetensors",
"fm_decoder.out_combiner.1.bypass_scale": "model.safetensors",
"fm_decoder.out_combiner.2.bypass_scale": "model.safetensors",
"fm_decoder.out_combiner.3.bypass_scale": "model.safetensors",
"fm_decoder.out_proj.bias": "model.safetensors",
"fm_decoder.out_proj.weight": "model.safetensors",
"fm_decoder.stack_time_emb.0.1.bias": "model.safetensors",
"fm_decoder.stack_time_emb.0.1.weight": "model.safetensors",
"fm_decoder.stack_time_emb.1.1.bias": "model.safetensors",
"fm_decoder.stack_time_emb.1.1.weight": "model.safetensors",
"fm_decoder.stack_time_emb.2.1.bias": "model.safetensors",
"fm_decoder.stack_time_emb.2.1.weight": "model.safetensors",
"fm_decoder.stack_time_emb.3.1.bias": "model.safetensors",
"fm_decoder.stack_time_emb.3.1.weight": "model.safetensors",
"fm_decoder.stack_time_emb.4.1.bias": "model.safetensors",
"fm_decoder.stack_time_emb.4.1.weight": "model.safetensors",
"fm_decoder.time_embed.0.bias": "model.safetensors",
"fm_decoder.time_embed.0.weight": "model.safetensors",
"fm_decoder.time_embed.2.bias": "model.safetensors",
"fm_decoder.time_embed.2.weight": "model.safetensors",
"text_encoder.in_proj.bias": "model.safetensors",
"text_encoder.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.bypass.bypass_scale": "model.safetensors",
"text_encoder.layers.0.bypass_mid.bypass_scale": "model.safetensors",
"text_encoder.layers.0.conv_module1.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.0.conv_module1.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.0.conv_module1.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.conv_module1.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.conv_module1.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.conv_module1.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.conv_module2.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.0.conv_module2.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.0.conv_module2.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.conv_module2.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.conv_module2.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.conv_module2.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward1.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward1.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward1.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward1.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward2.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward2.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward2.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward2.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward3.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward3.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.feed_forward3.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.feed_forward3.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.nonlin_attention.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.nonlin_attention.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.nonlin_attention.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.nonlin_attention.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.norm.bias": "model.safetensors",
"text_encoder.layers.0.norm.log_scale": "model.safetensors",
"text_encoder.layers.0.self_attn1.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.self_attn1.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.self_attn1.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.self_attn1.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.self_attn2.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.self_attn2.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.self_attn2.out_proj.bias": "model.safetensors",
"text_encoder.layers.0.self_attn2.out_proj.weight": "model.safetensors",
"text_encoder.layers.0.self_attn_weights.in_proj.bias": "model.safetensors",
"text_encoder.layers.0.self_attn_weights.in_proj.weight": "model.safetensors",
"text_encoder.layers.0.self_attn_weights.linear_pos.weight": "model.safetensors",
"text_encoder.layers.1.bypass.bypass_scale": "model.safetensors",
"text_encoder.layers.1.bypass_mid.bypass_scale": "model.safetensors",
"text_encoder.layers.1.conv_module1.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.1.conv_module1.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.1.conv_module1.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.conv_module1.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.conv_module1.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.conv_module1.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.conv_module2.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.1.conv_module2.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.1.conv_module2.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.conv_module2.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.conv_module2.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.conv_module2.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward1.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward1.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward1.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward1.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward2.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward2.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward2.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward2.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward3.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward3.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.feed_forward3.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.feed_forward3.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.nonlin_attention.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.nonlin_attention.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.nonlin_attention.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.nonlin_attention.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.norm.bias": "model.safetensors",
"text_encoder.layers.1.norm.log_scale": "model.safetensors",
"text_encoder.layers.1.self_attn1.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.self_attn1.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.self_attn1.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.self_attn1.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.self_attn2.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.self_attn2.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.self_attn2.out_proj.bias": "model.safetensors",
"text_encoder.layers.1.self_attn2.out_proj.weight": "model.safetensors",
"text_encoder.layers.1.self_attn_weights.in_proj.bias": "model.safetensors",
"text_encoder.layers.1.self_attn_weights.in_proj.weight": "model.safetensors",
"text_encoder.layers.1.self_attn_weights.linear_pos.weight": "model.safetensors",
"text_encoder.layers.2.bypass.bypass_scale": "model.safetensors",
"text_encoder.layers.2.bypass_mid.bypass_scale": "model.safetensors",
"text_encoder.layers.2.conv_module1.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.2.conv_module1.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.2.conv_module1.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.conv_module1.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.conv_module1.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.conv_module1.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.conv_module2.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.2.conv_module2.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.2.conv_module2.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.conv_module2.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.conv_module2.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.conv_module2.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward1.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward1.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward1.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward1.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward2.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward2.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward2.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward2.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward3.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward3.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.feed_forward3.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.feed_forward3.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.nonlin_attention.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.nonlin_attention.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.nonlin_attention.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.nonlin_attention.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.norm.bias": "model.safetensors",
"text_encoder.layers.2.norm.log_scale": "model.safetensors",
"text_encoder.layers.2.self_attn1.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.self_attn1.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.self_attn1.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.self_attn1.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.self_attn2.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.self_attn2.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.self_attn2.out_proj.bias": "model.safetensors",
"text_encoder.layers.2.self_attn2.out_proj.weight": "model.safetensors",
"text_encoder.layers.2.self_attn_weights.in_proj.bias": "model.safetensors",
"text_encoder.layers.2.self_attn_weights.in_proj.weight": "model.safetensors",
"text_encoder.layers.2.self_attn_weights.linear_pos.weight": "model.safetensors",
"text_encoder.layers.3.bypass.bypass_scale": "model.safetensors",
"text_encoder.layers.3.bypass_mid.bypass_scale": "model.safetensors",
"text_encoder.layers.3.conv_module1.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.3.conv_module1.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.3.conv_module1.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.conv_module1.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.conv_module1.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.conv_module1.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.conv_module2.depthwise_conv.bias": "model.safetensors",
"text_encoder.layers.3.conv_module2.depthwise_conv.weight": "model.safetensors",
"text_encoder.layers.3.conv_module2.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.conv_module2.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.conv_module2.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.conv_module2.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward1.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward1.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward1.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward1.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward2.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward2.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward2.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward2.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward3.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward3.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.feed_forward3.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.feed_forward3.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.nonlin_attention.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.nonlin_attention.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.nonlin_attention.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.nonlin_attention.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.norm.bias": "model.safetensors",
"text_encoder.layers.3.norm.log_scale": "model.safetensors",
"text_encoder.layers.3.self_attn1.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.self_attn1.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.self_attn1.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.self_attn1.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.self_attn2.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.self_attn2.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.self_attn2.out_proj.bias": "model.safetensors",
"text_encoder.layers.3.self_attn2.out_proj.weight": "model.safetensors",
"text_encoder.layers.3.self_attn_weights.in_proj.bias": "model.safetensors",
"text_encoder.layers.3.self_attn_weights.in_proj.weight": "model.safetensors",
"text_encoder.layers.3.self_attn_weights.linear_pos.weight": "model.safetensors",
"text_encoder.out_proj.bias": "model.safetensors",
"text_encoder.out_proj.weight": "model.safetensors"
}
}