diff --git a/.gitattributes b/.gitattributes
index 3794e80498d65e401cdc0632b9a0e5bb22d7841c..50ed0410b5d7f5829438e3b27a48b06f77fe8a7a 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1022,3 +1022,5 @@ custom_nodes/mikey_nodes/noise_bw.png filter=lfs diff=lfs merge=lfs -text
models/clipseg/models--CIDAS--clipseg-rd64-refined/blobs/d00ca85d6b859f9d07b7cfb8ef26fe9771cb275b34c9368f2ecf603139307f55 filter=lfs diff=lfs merge=lfs -text
models/prompt_generator/opus-mt-id-en/source.spm filter=lfs diff=lfs merge=lfs -text
models/prompt_generator/opus-mt-id-en/target.spm filter=lfs diff=lfs merge=lfs -text
+models/checkpoints/bopbtf filter=lfs diff=lfs merge=lfs -text
+models/facedetection/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
diff --git a/models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json b/models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..806684e0f0ccd0fa4eeb90a5ba867fbe67b29877
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json
@@ -0,0 +1,18 @@
+{
+ "_class_name": "CogVideoXDDIMScheduler",
+ "_diffusers_version": "0.31.0.dev0",
+ "beta_end": 0.012,
+ "beta_schedule": "scaled_linear",
+ "beta_start": 0.00085,
+ "clip_sample": false,
+ "clip_sample_range": 1.0,
+ "num_train_timesteps": 1000,
+ "prediction_type": "v_prediction",
+ "rescale_betas_zero_snr": true,
+ "sample_max_value": 1.0,
+ "set_alpha_to_one": true,
+ "snr_shift_scale": 1.0,
+ "steps_offset": 0,
+ "timestep_spacing": "trailing",
+ "trained_betas": null
+}
diff --git a/models/CogVideo/CogVideoX-5b-I2V/transformer/config.json b/models/CogVideo/CogVideoX-5b-I2V/transformer/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0703c6900bb90c2cc70abcd41fe7101244af8fdf
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/transformer/config.json
@@ -0,0 +1,29 @@
+{
+ "_class_name": "CogVideoXTransformer3DModel",
+ "_diffusers_version": "0.31.0.dev0",
+ "activation_fn": "gelu-approximate",
+ "attention_bias": true,
+ "attention_head_dim": 64,
+ "dropout": 0.0,
+ "flip_sin_to_cos": true,
+ "freq_shift": 0,
+ "in_channels": 32,
+ "max_text_seq_length": 226,
+ "norm_elementwise_affine": true,
+ "norm_eps": 1e-05,
+ "num_attention_heads": 48,
+ "num_layers": 42,
+ "out_channels": 16,
+ "patch_size": 2,
+ "sample_frames": 49,
+ "sample_height": 60,
+ "sample_width": 90,
+ "spatial_interpolation_scale": 1.875,
+ "temporal_compression_ratio": 4,
+ "temporal_interpolation_scale": 1.0,
+ "text_embed_dim": 4096,
+ "time_embed_dim": 512,
+ "timestep_activation_fn": "silu",
+ "use_learned_positional_embeddings": true,
+ "use_rotary_positional_embeddings": true
+}
diff --git a/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..067fbc6270bf96989c5e6220b5763e628d8532d2
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f2e3060199c34a0d18892a19d687455f938b0ac3d2ea7d48f37cb4090e141965
+size 4992465072
diff --git a/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a8cc889403c66e35eb25dcdae346669d794e01b
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e8d0c62d366b0d9cc3476d2b21ca54afbecea154d54d923da120b2ec174c7e7
+size 4985800640
diff --git a/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3da1541c7237fbc85c014d0e54652a778eadd74b
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da91a0051da3f39caf10944b7c9aa66b14ddeffb37a25b087c49fc1692c1a361
+size 1272025856
diff --git a/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..c7b90be5b1457ac87ec7b7f0c882472e4002c39d
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json
@@ -0,0 +1,1032 @@
+{
+ "metadata": {
+ "total_size": 11250175104
+ },
+ "weight_map": {
+ "norm_final.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "norm_final.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "norm_out.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "norm_out.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "norm_out.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "norm_out.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "patch_embed.pos_embedding": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "patch_embed.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "patch_embed.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "patch_embed.text_proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "patch_embed.text_proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "proj_out.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "proj_out.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "time_embedding.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "time_embedding.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "time_embedding.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "time_embedding.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.0.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.1.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.10.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.11.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.12.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.13.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.14.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.15.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.16.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.17.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.18.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.18.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.19.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.2.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.2.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.20.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.20.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.21.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.22.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.23.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.24.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.25.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.26.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.27.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.28.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.29.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.3.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.3.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.30.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.30.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.31.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.32.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.33.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.34.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.35.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.ff.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.ff.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.ff.net.2.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.ff.net.2.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm2.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm2.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm2.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.36.norm2.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.norm_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.norm_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.ff.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.ff.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.ff.net.2.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.ff.net.2.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.norm1.linear.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.norm1.linear.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.norm1.norm.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.norm1.norm.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
+ "transformer_blocks.37.norm2.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.norm2.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.norm2.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.37.norm2.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.norm_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.norm_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.ff.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.ff.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.ff.net.2.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.ff.net.2.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm1.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm1.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm1.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm1.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm2.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm2.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm2.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.38.norm2.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.norm_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.norm_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.ff.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.ff.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.ff.net.2.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.ff.net.2.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm1.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm1.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm1.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm1.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm2.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm2.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm2.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.39.norm2.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.4.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.4.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.40.attn1.norm_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.norm_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.ff.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.ff.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.ff.net.2.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.ff.net.2.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm1.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm1.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm1.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm1.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm2.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm2.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm2.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.40.norm2.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.norm_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.norm_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.ff.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.ff.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.ff.net.2.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.ff.net.2.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm1.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm1.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm1.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm1.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm2.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm2.linear.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm2.norm.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.41.norm2.norm.weight": "diffusion_pytorch_model-00003-of-00003.safetensors",
+ "transformer_blocks.5.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.5.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.6.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.7.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.8.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.norm_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.norm_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm1.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm1.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm1.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm2.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm2.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm2.norm.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
+ "transformer_blocks.9.norm2.norm.weight": "diffusion_pytorch_model-00001-of-00003.safetensors"
+ }
+}
diff --git a/models/CogVideo/CogVideoX-5b-I2V/vae/config.json b/models/CogVideo/CogVideoX-5b-I2V/vae/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8f2badea95227e1c8ce0df5c11920a930ae09829
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/vae/config.json
@@ -0,0 +1,40 @@
+{
+ "_class_name": "AutoencoderKLCogVideoX",
+ "_diffusers_version": "0.32.0.dev0",
+ "act_fn": "silu",
+ "block_out_channels": [
+ 128,
+ 256,
+ 256,
+ 512
+ ],
+ "down_block_types": [
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D"
+ ],
+ "force_upcast": true,
+ "in_channels": 3,
+ "latent_channels": 16,
+ "latents_mean": null,
+ "latents_std": null,
+ "layers_per_block": 3,
+ "norm_eps": 1e-06,
+ "norm_num_groups": 32,
+ "out_channels": 3,
+ "sample_height": 480,
+ "sample_width": 720,
+ "scaling_factor": 0.7,
+ "shift_factor": null,
+ "temporal_compression_ratio": 4,
+ "up_block_types": [
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D"
+ ],
+ "use_post_quant_conv": false,
+ "use_quant_conv": false,
+ "invert_scale_latents": false
+}
diff --git a/models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors b/models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bb977c24bd502a1b34c74b78bc8c524fcc0833aa
--- /dev/null
+++ b/models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e
+size 862388596
diff --git a/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc b/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc
index 04cd5528cd829ab7fd2724eb9fc381b5ab7e5ee9..e584dd333c0531d42905ec970d8c452f12b180e1 100644
Binary files a/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc and b/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc differ
diff --git a/models/RMBG/RMBG-2.0/__pycache__/BiRefNet_config.cpython-310.pyc b/models/RMBG/RMBG-2.0/__pycache__/BiRefNet_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b45431d9fa4d8380c2b4305b8f5c8399b7e7c9c
Binary files /dev/null and b/models/RMBG/RMBG-2.0/__pycache__/BiRefNet_config.cpython-310.pyc differ
diff --git a/models/checkpoints/bopbt/FT_Epoch_latest.pt b/models/checkpoints/bopbt/FT_Epoch_latest.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2b688be71a8803225894e709332ef33f05044fc4
--- /dev/null
+++ b/models/checkpoints/bopbt/FT_Epoch_latest.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2d7ab04e9b3885c6b1991bb7a0b823129dd6e3ac078a9fd059ebd2a7ba59a95
+size 451663663
diff --git a/models/checkpoints/bopbt/FaceSR_512/latest_net_G.pth b/models/checkpoints/bopbt/FaceSR_512/latest_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..839a09ae3982fb5deceabfbb47c8436d5797427e
--- /dev/null
+++ b/models/checkpoints/bopbt/FaceSR_512/latest_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b3dc71f29cc98a61d9a7ccc48f329726feca19b67c2d3710b69b2deb96aef22
+size 368779463
diff --git a/models/checkpoints/bopbt/Setting_9_epoch_100/latest_net_G.pth b/models/checkpoints/bopbt/Setting_9_epoch_100/latest_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..864f66319a04783ef5c94d7053fb919eb5ff6d6f
--- /dev/null
+++ b/models/checkpoints/bopbt/Setting_9_epoch_100/latest_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eaafe424c22225891f60e672f331e14521bef870e6ce490f988ce6ada4068569
+size 368738923
diff --git a/models/checkpoints/bopbt/mapping_Patch_Attention/latest_net_mapping_net.pth b/models/checkpoints/bopbt/mapping_Patch_Attention/latest_net_mapping_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a2578262ecae61936d730f08b6efb23d9135a2c4
--- /dev/null
+++ b/models/checkpoints/bopbt/mapping_Patch_Attention/latest_net_mapping_net.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15f7a145b4eb94565d2f5988f6ca9948d04cef1599f4ca6eb99429e7d2c7d783
+size 295645915
diff --git a/models/checkpoints/bopbt/mapping_quality/latest_net_mapping_net.pth b/models/checkpoints/bopbt/mapping_quality/latest_net_mapping_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2f0aa49e88623e0865c90f5124520977c8c0d007
--- /dev/null
+++ b/models/checkpoints/bopbt/mapping_quality/latest_net_mapping_net.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb193cc0c82a6f4ed1c746beec5366c8a1e4a1b8dad3c83148afd0f93814624b
+size 144549781
diff --git a/models/checkpoints/bopbt/mapping_scratch/latest_net_mapping_net.pth b/models/checkpoints/bopbt/mapping_scratch/latest_net_mapping_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3f4deea4d211c93b51c060444240ed0c41e4aad0
--- /dev/null
+++ b/models/checkpoints/bopbt/mapping_scratch/latest_net_mapping_net.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a63f9e33802a262fa62b160785fa3c53166f3d1449046ad9afc742848e195578
+size 205392667
diff --git a/models/checkpoints/bopbtf b/models/checkpoints/bopbtf
new file mode 100644
index 0000000000000000000000000000000000000000..2f0aa49e88623e0865c90f5124520977c8c0d007
--- /dev/null
+++ b/models/checkpoints/bopbtf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb193cc0c82a6f4ed1c746beec5366c8a1e4a1b8dad3c83148afd0f93814624b
+size 144549781
diff --git a/models/checkpoints/juggernautXL_juggXIByRundiffusion.safetensors b/models/checkpoints/juggernautXL_juggXIByRundiffusion.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9effffa4b57dab861dbead70687ba27483ef4ed8
--- /dev/null
+++ b/models/checkpoints/juggernautXL_juggXIByRundiffusion.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33e58e86686f6b386c526682b5da9228ead4f91d994abd4b053442dc5b42719e
+size 7105350536
diff --git a/models/checkpoints/kantanmix_v10.safetensors b/models/checkpoints/kantanmix_v10.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..740d8465106d29b3c9c2a7c208526d80a28e7870
--- /dev/null
+++ b/models/checkpoints/kantanmix_v10.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e6c46570180b4defbf0841cfe4f491f5eaa38f48a61f85f29cf83fdf0dd82ddf
+size 2132626758
diff --git a/models/checkpoints/models/checkpoints/kantanmixSD15_v10.safetensors b/models/checkpoints/models/checkpoints/kantanmixSD15_v10.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2b5a3cacea552bc03a10004c268dcf03af3222fa
--- /dev/null
+++ b/models/checkpoints/models/checkpoints/kantanmixSD15_v10.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97d38c162efd7bd6f9fd3298019360a3dcccbc2d79da9ca095e1052000398af3
+size 505062656
diff --git a/models/checkpoints/tmp_795765.safetensors b/models/checkpoints/tmp_795765.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7c6e9f931465e1d28746b242bde0cc81a5c0cad4
--- /dev/null
+++ b/models/checkpoints/tmp_795765.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a6a04edf5ec290a89838947ca683cf53bceab73b5ab95c30635464c731ce2d8
+size 19263640
diff --git a/models/clip/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors b/models/clip/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..61d1c5e253db6c8a04bffff5b48ee2b7a276bcc8
--- /dev/null
+++ b/models/clip/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1bc257ac78ef7cf40c145b4319e759525557609b96820831f6eea2e49da99b5
+size 323409740
diff --git a/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main b/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main
index 674ef872b67fbeb59f31636d3f372a6a6a84901a..d47b4e9e20b4ba7925e3ddcbed7c61f455b073bc 100644
--- a/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main
+++ b/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main
@@ -1 +1 @@
-689a4528f64ee8a01e0710a91fa2c70793428860
\ No newline at end of file
+18d0535469bb561bf468d76c1d73aa35156c922b
\ No newline at end of file
diff --git a/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/snapshots/18d0535469bb561bf468d76c1d73aa35156c922b/open_clip_model.safetensors b/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/snapshots/18d0535469bb561bf468d76c1d73aa35156c922b/open_clip_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e3db780d09e2ee381c0c08d437cb7652023976e1
--- /dev/null
+++ b/models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/snapshots/18d0535469bb561bf468d76c1d73aa35156c922b/open_clip_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ce2e8a8ebfff3793d7d375ad6d3c35cb9aebf3de7ace0fc7308accab7cd207e
+size 1710517724
diff --git a/models/diffusion_models/flux/STOIQONewrealityFLUXSD_F1DAlpha.safetensors b/models/diffusion_models/flux/STOIQONewrealityFLUXSD_F1DAlpha.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18183636728c7f8d7b355587c900b4ed4657f623
--- /dev/null
+++ b/models/diffusion_models/flux/STOIQONewrealityFLUXSD_F1DAlpha.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0396ef03b03a2f73ee473127fbc4b15d009ce561e03c4ba3005f92452393d163
+size 11901517328
diff --git a/models/diffusion_models/wan/Phantom-Wan-14B_fp8_e4m3fn.safetensors b/models/diffusion_models/wan/Phantom-Wan-14B_fp8_e4m3fn.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..00cadb5b3576f5ed3b5a34056c950cfc6c613f3f
--- /dev/null
+++ b/models/diffusion_models/wan/Phantom-Wan-14B_fp8_e4m3fn.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:205c2924aadcd4e1312d6aac0b4cfba80eeea33db99419b113c10eec4810cabc
+size 15001320640
diff --git a/models/diffusion_models/wan/aniWan2114BFp8E4m3fn_t2v.safetensors b/models/diffusion_models/wan/aniWan2114BFp8E4m3fn_t2v.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..efec332c80ad60b680f32110a0a64b32b359e137
--- /dev/null
+++ b/models/diffusion_models/wan/aniWan2114BFp8E4m3fn_t2v.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a59b04fc09ef456bbce817875684c05923bdb62fbe11f248f8e037293c04925
+size 14289631824
diff --git a/models/face_parsing/config.json b/models/face_parsing/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..ec0bfdcb824a07a97703061444af609eefa5c968
--- /dev/null
+++ b/models/face_parsing/config.json
@@ -0,0 +1,111 @@
+{
+ "_name_or_path": "jonathandinu/face-parsing",
+ "architectures": [
+ "SegformerForSemanticSegmentation"
+ ],
+ "attention_probs_dropout_prob": 0.0,
+ "classifier_dropout_prob": 0.1,
+ "decoder_hidden_size": 768,
+ "depths": [
+ 3,
+ 6,
+ 40,
+ 3
+ ],
+ "downsampling_rates": [
+ 1,
+ 4,
+ 8,
+ 16
+ ],
+ "drop_path_rate": 0.1,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.0,
+ "hidden_sizes": [
+ 64,
+ 128,
+ 320,
+ 512
+ ],
+ "id2label": {
+ "0": "background",
+ "1": "skin",
+ "2": "nose",
+ "3": "eye_g",
+ "4": "l_eye",
+ "5": "r_eye",
+ "6": "l_brow",
+ "7": "r_brow",
+ "8": "l_ear",
+ "9": "r_ear",
+ "10": "mouth",
+ "11": "u_lip",
+ "12": "l_lip",
+ "13": "hair",
+ "14": "hat",
+ "15": "ear_r",
+ "16": "neck_l",
+ "17": "neck",
+ "18": "cloth"
+ },
+ "image_size": 224,
+ "initializer_range": 0.02,
+ "label2id": {
+ "background": 0,
+ "skin": 1,
+ "nose": 2,
+ "eye_g": 3,
+ "l_eye": 4,
+ "r_eye": 5,
+ "l_brow": 6,
+ "r_brow": 7,
+ "l_ear": 8,
+ "r_ear": 9,
+ "mouth": 10,
+ "u_lip": 11,
+ "l_lip": 12,
+ "hair": 13,
+ "hat": 14,
+ "ear_r": 15,
+ "neck_l": 16,
+ "neck": 17,
+ "cloth": 18
+ },
+ "layer_norm_eps": 1e-06,
+ "mlp_ratios": [
+ 4,
+ 4,
+ 4,
+ 4
+ ],
+ "model_type": "segformer",
+ "num_attention_heads": [
+ 1,
+ 2,
+ 5,
+ 8
+ ],
+ "num_channels": 3,
+ "num_encoder_blocks": 4,
+ "patch_sizes": [
+ 7,
+ 3,
+ 3,
+ 3
+ ],
+ "reshape_last_stage": true,
+ "semantic_loss_ignore_index": 255,
+ "sr_ratios": [
+ 8,
+ 4,
+ 2,
+ 1
+ ],
+ "strides": [
+ 4,
+ 2,
+ 2,
+ 2
+ ],
+ "transformers_version": "4.37.0.dev0"
+}
diff --git a/models/face_parsing/model.safetensors b/models/face_parsing/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1e30381f99433a6a415b0c2816579b9557e71a48
--- /dev/null
+++ b/models/face_parsing/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2bec795a8c243db71bd95be538fd62559003566466c71237e45c99b920f4b62
+size 338580732
diff --git a/models/face_parsing/preprocessor_config.json b/models/face_parsing/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..89faa86b52097b90ef95c2cc85eb6c298a24a57e
--- /dev/null
+++ b/models/face_parsing/preprocessor_config.json
@@ -0,0 +1,23 @@
+{
+ "do_normalize": true,
+ "do_reduce_labels": false,
+ "do_rescale": true,
+ "do_resize": true,
+ "image_mean": [
+ 0.485,
+ 0.456,
+ 0.406
+ ],
+ "image_processor_type": "SegformerFeatureExtractor",
+ "image_std": [
+ 0.229,
+ 0.224,
+ 0.225
+ ],
+ "resample": 2,
+ "rescale_factor": 0.00392156862745098,
+ "size": {
+ "height": 512,
+ "width": 512
+ }
+}
diff --git a/models/facedetection/shape_predictor_68_face_landmarks.dat b/models/facedetection/shape_predictor_68_face_landmarks.dat
new file mode 100644
index 0000000000000000000000000000000000000000..1e5da4f9a556bec8582e6c55b89b3e6bfdd60021
--- /dev/null
+++ b/models/facedetection/shape_predictor_68_face_landmarks.dat
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
+size 99693937
diff --git a/models/float/.gitattributes b/models/float/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/models/float/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/models/float/float.pth b/models/float/float.pth
new file mode 100644
index 0000000000000000000000000000000000000000..28fc51844fd40b749254ab25342981c98d8eb347
--- /dev/null
+++ b/models/float/float.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5cd815510908284eca7c5b9e81bdd63fc4cbef761f83bfb8fbe5e2880b2b830
+size 826739645
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/.gitattributes b/models/float/wav2vec-english-speech-emotion-recognition/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..637fa167e56685c01bc97f08a420ea76330cf6df
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/.gitattributes
@@ -0,0 +1,32 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/README.md b/models/float/wav2vec-english-speech-emotion-recognition/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..15f1937b9a01a4360800a6d78cc6bf6891e6bddf
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/README.md
@@ -0,0 +1,86 @@
+---
+license: apache-2.0
+tags:
+- generated_from_trainer
+metrics:
+- accuracy
+model_index:
+ name: wav2vec-english-speech-emotion-recognition
+---
+# Speech Emotion Recognition By Fine-Tuning Wav2Vec 2.0
+The model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-english](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english) for a Speech Emotion Recognition (SER) task.
+
+Several datasets were used the fine-tune the original model:
+- Surrey Audio-Visual Expressed Emotion [(SAVEE)](http://kahlan.eps.surrey.ac.uk/savee/Database.html) - 480 audio files from 4 male actors
+- Ryerson Audio-Visual Database of Emotional Speech and Song [(RAVDESS)](https://zenodo.org/record/1188976) - 1440 audio files from 24 professional actors (12 female, 12 male)
+- Toronto emotional speech set [(TESS)](https://tspace.library.utoronto.ca/handle/1807/24487) - 2800 audio files from 2 female actors
+
+7 labels/emotions were used as classification labels
+```python
+emotions = ['angry' 'disgust' 'fear' 'happy' 'neutral' 'sad' 'surprise']
+```
+It achieves the following results on the evaluation set:
+- Loss: 0.104075
+- Accuracy: 0.97463
+
+## Model Usage
+```bash
+pip install transformers librosa torch
+```
+```python
+from transformers import *
+import librosa
+import torch
+
+feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("r-f/wav2vec-english-speech-emotion-recognition")
+model = Wav2Vec2ForCTC.from_pretrained("r-f/wav2vec-english-speech-emotion-recognition")
+
+def predict_emotion(audio_path):
+ audio, rate = librosa.load(audio_path, sr=16000)
+ inputs = feature_extractor(audio, sampling_rate=rate, return_tensors="pt", padding=True)
+
+ with torch.no_grad():
+ outputs = model(inputs.input_values)
+ predictions = torch.nn.functional.softmax(outputs.logits.mean(dim=1), dim=-1) # Average over sequence length
+ predicted_label = torch.argmax(predictions, dim=-1)
+ emotion = model.config.id2label[predicted_label.item()]
+ return emotion
+
+emotion = predict_emotion("example_audio.wav")
+print(f"Predicted emotion: {emotion}")
+>> Predicted emotion: angry
+```
+
+
+## Training procedure
+### Training hyperparameters
+The following hyperparameters were used during training:
+- learning_rate: 0.0001
+- train_batch_size: 4
+- eval_batch_size: 4
+- eval_steps: 500
+- seed: 42
+- gradient_accumulation_steps: 2
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- num_epochs: 4
+- max_steps=7500
+- save_steps: 1500
+
+### Training results
+| Step | Training Loss | Validation Loss | Accuracy |
+| ---- | ------------- | --------------- | -------- |
+| 500 | 1.8124 | 1.365212 | 0.486258 |
+| 1000 | 0.8872 | 0.773145 | 0.79704 |
+| 1500 | 0.7035 | 0.574954 | 0.852008 |
+| 2000 | 0.6879 | 1.286738 | 0.775899 |
+| 2500 | 0.6498 | 0.697455 | 0.832981 |
+| 3000 | 0.5696 | 0.33724 | 0.892178 |
+| 3500 | 0.4218 | 0.307072 | 0.911205 |
+| 4000 | 0.3088 | 0.374443 | 0.930233 |
+| 4500 | 0.2688 | 0.260444 | 0.936575 |
+| 5000 | 0.2973 | 0.302985 | 0.92389 |
+| 5500 | 0.1765 | 0.165439 | 0.961945 |
+| 6000 | 0.1475 | 0.170199 | 0.961945 |
+| 6500 | 0.1274 | 0.15531 | 0.966173 |
+| 7000 | 0.0699 | 0.103882 | 0.976744 |
+| 7500 | 0.083 | 0.104075 | 0.97463 |
\ No newline at end of file
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/config.json b/models/float/wav2vec-english-speech-emotion-recognition/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3eaea64b5698087ad6ff7366381068b33a58de6d
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/config.json
@@ -0,0 +1,137 @@
+{
+ "_name_or_path": "jonatasgrosman/wav2vec2-large-xlsr-53-english",
+ "processor_class": "Wav2Vec2CTCTokenizer",
+ "activation_dropout": 0.05,
+ "adapter_kernel_size": 3,
+ "adapter_stride": 2,
+ "add_adapter": false,
+ "apply_spec_augment": true,
+ "architectures": [
+ "Wav2Vec2ForCTC"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 1,
+ "classifier_proj_size": 256,
+ "codevector_dim": 256,
+ "contrastive_logits_temperature": 0.1,
+ "conv_bias": true,
+ "conv_dim": [
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512
+ ],
+ "conv_kernel": [
+ 10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2
+ ],
+ "conv_stride": [
+ 5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2
+ ],
+ "ctc_loss_reduction": "mean",
+ "ctc_zero_infinity": true,
+ "diversity_loss_weight": 0.1,
+ "do_stable_layer_norm": true,
+ "eos_token_id": 2,
+ "feat_extract_activation": "gelu",
+ "feat_extract_dropout": 0.0,
+ "feat_extract_norm": "layer",
+ "feat_proj_dropout": 0.05,
+ "feat_quantizer_dropout": 0.0,
+ "final_dropout": 0.0,
+ "finetuning_task": "wav2vec2_clf",
+ "hidden_act": "gelu",
+ "hidden_dropout": 0.05,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "angry",
+ "1": "disgust",
+ "2": "fear",
+ "3": "happy",
+ "4": "neutral",
+ "5": "sad",
+ "6": "surprise"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "angry": 0,
+ "disgust": 1,
+ "fear": 2,
+ "happy": 3,
+ "neutral": 4,
+ "sad": 5,
+ "surprise": 6
+ },
+ "layer_norm_eps": 1e-05,
+ "layerdrop": 0.05,
+ "mask_channel_length": 10,
+ "mask_channel_min_space": 1,
+ "mask_channel_other": 0.0,
+ "mask_channel_prob": 0.0,
+ "mask_channel_selection": "static",
+ "mask_feature_length": 10,
+ "mask_feature_min_masks": 0,
+ "mask_feature_prob": 0.0,
+ "mask_time_length": 10,
+ "mask_time_min_masks": 2,
+ "mask_time_min_space": 1,
+ "mask_time_other": 0.0,
+ "mask_time_prob": 0.05,
+ "mask_time_selection": "static",
+ "model_type": "wav2vec2",
+ "num_adapter_layers": 3,
+ "num_attention_heads": 16,
+ "num_codevector_groups": 2,
+ "num_codevectors_per_group": 320,
+ "num_conv_pos_embedding_groups": 16,
+ "num_conv_pos_embeddings": 128,
+ "num_feat_extract_layers": 7,
+ "num_hidden_layers": 24,
+ "num_negatives": 100,
+ "output_hidden_size": 1024,
+ "pad_token_id": 0,
+ "pooling_mode": "mean",
+ "problem_type": "single_label_classification",
+ "proj_codevector_dim": 256,
+ "tdnn_dilation": [
+ 1,
+ 2,
+ 3,
+ 1,
+ 1
+ ],
+ "tdnn_dim": [
+ 512,
+ 512,
+ 512,
+ 512,
+ 1500
+ ],
+ "tdnn_kernel": [
+ 5,
+ 3,
+ 3,
+ 1,
+ 1
+ ],
+ "torch_dtype": "float32",
+ "transformers_version": "4.22.1",
+ "use_weighted_layer_sum": false,
+ "vocab_size": 33,
+ "xvector_output_dim": 512
+}
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/preprocessor_config.json b/models/float/wav2vec-english-speech-emotion-recognition/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..9f99bcabcbeaf80e6791d79c9cb6cd68c6e7ae95
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/preprocessor_config.json
@@ -0,0 +1,10 @@
+{
+ "do_normalize": true,
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
+ "feature_size": 1,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "processor_class": "Wav2Vec2ProcessorWithLM",
+ "return_attention_mask": true,
+ "sampling_rate": 16000
+}
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/pytorch_model.bin b/models/float/wav2vec-english-speech-emotion-recognition/pytorch_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..bbe88c9dc2f4995533d38d452e100c6dabdef6ed
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/pytorch_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6470434ecf20ae93b22284ac83877984fb8765e332037c36a54df6607e3a206
+size 1266126445
diff --git a/models/float/wav2vec-english-speech-emotion-recognition/training_args.bin b/models/float/wav2vec-english-speech-emotion-recognition/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7385d9f4c22face37b8f9e6533670bd657309483
--- /dev/null
+++ b/models/float/wav2vec-english-speech-emotion-recognition/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b7a4b18e6dd098bbeba86991ea3a66623c19570bf00ab392b2b8e7e72ee8598
+size 3439
diff --git a/models/float/wav2vec2-base-960h/.gitattributes b/models/float/wav2vec2-base-960h/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..cf6d51fc9b1a671c35e92d6bd009880937aaa12d
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/.gitattributes
@@ -0,0 +1,18 @@
+*.bin.* filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tar.gz filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+model.safetensors filter=lfs diff=lfs merge=lfs -text
diff --git a/models/float/wav2vec2-base-960h/README.md b/models/float/wav2vec2-base-960h/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7fe2047d7ac9b9816848c657b2a492ee95b264b
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/README.md
@@ -0,0 +1,128 @@
+---
+language: en
+datasets:
+- librispeech_asr
+tags:
+- audio
+- automatic-speech-recognition
+- hf-asr-leaderboard
+license: apache-2.0
+widget:
+- example_title: Librispeech sample 1
+ src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
+- example_title: Librispeech sample 2
+ src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
+model-index:
+- name: wav2vec2-base-960h
+ results:
+ - task:
+ name: Automatic Speech Recognition
+ type: automatic-speech-recognition
+ dataset:
+ name: LibriSpeech (clean)
+ type: librispeech_asr
+ config: clean
+ split: test
+ args:
+ language: en
+ metrics:
+ - name: Test WER
+ type: wer
+ value: 3.4
+ - task:
+ name: Automatic Speech Recognition
+ type: automatic-speech-recognition
+ dataset:
+ name: LibriSpeech (other)
+ type: librispeech_asr
+ config: other
+ split: test
+ args:
+ language: en
+ metrics:
+ - name: Test WER
+ type: wer
+ value: 8.6
+---
+
+# Wav2Vec2-Base-960h
+
+[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
+
+The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio. When using the model
+make sure that your speech input is also sampled at 16Khz.
+
+[Paper](https://arxiv.org/abs/2006.11477)
+
+Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli
+
+**Abstract**
+
+We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.
+
+The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
+
+
+# Usage
+
+To transcribe audio files the model can be used as a standalone acoustic model as follows:
+
+```python
+ from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
+ from datasets import load_dataset
+ import torch
+
+ # load model and tokenizer
+ processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
+ model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
+
+ # load dummy dataset and read soundfiles
+ ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
+
+ # tokenize
+ input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1
+
+ # retrieve logits
+ logits = model(input_values).logits
+
+ # take argmax and decode
+ predicted_ids = torch.argmax(logits, dim=-1)
+ transcription = processor.batch_decode(predicted_ids)
+ ```
+
+ ## Evaluation
+
+ This code snippet shows how to evaluate **facebook/wav2vec2-base-960h** on LibriSpeech's "clean" and "other" test data.
+
+```python
+from datasets import load_dataset
+from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
+import torch
+from jiwer import wer
+
+
+librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
+
+model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
+processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
+
+def map_to_pred(batch):
+ input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
+ with torch.no_grad():
+ logits = model(input_values.to("cuda")).logits
+
+ predicted_ids = torch.argmax(logits, dim=-1)
+ transcription = processor.batch_decode(predicted_ids)
+ batch["transcription"] = transcription
+ return batch
+
+result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["audio"])
+
+print("WER:", wer(result["text"], result["transcription"]))
+```
+
+*Result (WER)*:
+
+| "clean" | "other" |
+|---|---|
+| 3.4 | 8.6 |
\ No newline at end of file
diff --git a/models/float/wav2vec2-base-960h/config.json b/models/float/wav2vec2-base-960h/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8ca9cc7496e145e37d09cec17d0c3bf9b8523c8e
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/config.json
@@ -0,0 +1,77 @@
+{
+ "_name_or_path": "facebook/wav2vec2-base-960h",
+ "activation_dropout": 0.1,
+ "apply_spec_augment": true,
+ "architectures": [
+ "Wav2Vec2ForCTC"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 1,
+ "codevector_dim": 256,
+ "contrastive_logits_temperature": 0.1,
+ "conv_bias": false,
+ "conv_dim": [
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512
+ ],
+ "conv_kernel": [
+ 10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2
+ ],
+ "conv_stride": [
+ 5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2
+ ],
+ "ctc_loss_reduction": "sum",
+ "ctc_zero_infinity": false,
+ "diversity_loss_weight": 0.1,
+ "do_stable_layer_norm": false,
+ "eos_token_id": 2,
+ "feat_extract_activation": "gelu",
+ "feat_extract_dropout": 0.0,
+ "feat_extract_norm": "group",
+ "feat_proj_dropout": 0.1,
+ "feat_quantizer_dropout": 0.0,
+ "final_dropout": 0.1,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout": 0.1,
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 768,
+ "initializer_range": 0.02,
+ "intermediate_size": 3072,
+ "layer_norm_eps": 1e-05,
+ "layerdrop": 0.1,
+ "mask_feature_length": 10,
+ "mask_feature_prob": 0.0,
+ "mask_time_length": 10,
+ "mask_time_prob": 0.05,
+ "model_type": "wav2vec2",
+ "num_attention_heads": 12,
+ "num_codevector_groups": 2,
+ "num_codevectors_per_group": 320,
+ "num_conv_pos_embedding_groups": 16,
+ "num_conv_pos_embeddings": 128,
+ "num_feat_extract_layers": 7,
+ "num_hidden_layers": 12,
+ "num_negatives": 100,
+ "pad_token_id": 0,
+ "proj_codevector_dim": 256,
+ "transformers_version": "4.7.0.dev0",
+ "vocab_size": 32
+}
diff --git a/models/float/wav2vec2-base-960h/feature_extractor_config.json b/models/float/wav2vec2-base-960h/feature_extractor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..52fdd74dc06f40033506e402269fbde5e7adc21d
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/feature_extractor_config.json
@@ -0,0 +1,8 @@
+{
+ "do_normalize": true,
+ "feature_dim": 1,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/models/float/wav2vec2-base-960h/model.safetensors b/models/float/wav2vec2-base-960h/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..942562678fb28df86c055027c18216fa2a7cb5dd
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8aa76ab2243c81747a1f832954586bc566090c83a0ac167df6f31f0fa917d74a
+size 377607901
diff --git a/models/float/wav2vec2-base-960h/preprocessor_config.json b/models/float/wav2vec2-base-960h/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3f24dc078fcba55ee1d417a413847ead40c093a3
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/preprocessor_config.json
@@ -0,0 +1,8 @@
+{
+ "do_normalize": true,
+ "feature_size": 1,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/models/float/wav2vec2-base-960h/pytorch_model.bin b/models/float/wav2vec2-base-960h/pytorch_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d630db45384aa007f54a9a1b37da83c5a208f4cf
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/pytorch_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c34f9827b034a1b9141dbf6f652f8a60eda61cdf5771c9e05bfa99033c92cd96
+size 377667514
diff --git a/models/float/wav2vec2-base-960h/special_tokens_map.json b/models/float/wav2vec2-base-960h/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..25bc39604f72700b3b8e10bd69bb2f227157edd1
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/special_tokens_map.json
@@ -0,0 +1 @@
+{"bos_token": "", "eos_token": "", "unk_token": "", "pad_token": ""}
\ No newline at end of file
diff --git a/models/float/wav2vec2-base-960h/tf_model.h5 b/models/float/wav2vec2-base-960h/tf_model.h5
new file mode 100644
index 0000000000000000000000000000000000000000..e6d1d69dc1ac70461fd5754f00e9d1d9626bd400
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/tf_model.h5
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:412742825972a6e2e877255ccd8b3416e618df15a7f1e5e4f736aa3632ce33b5
+size 377840624
diff --git a/models/float/wav2vec2-base-960h/tokenizer_config.json b/models/float/wav2vec2-base-960h/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..978a15a96dbb2d23e2afbc70137cae6c5ce38c8d
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/tokenizer_config.json
@@ -0,0 +1 @@
+{"unk_token": "", "bos_token": "", "eos_token": "", "pad_token": "", "do_lower_case": false, "return_attention_mask": false, "do_normalize": true}
\ No newline at end of file
diff --git a/models/float/wav2vec2-base-960h/vocab.json b/models/float/wav2vec2-base-960h/vocab.json
new file mode 100644
index 0000000000000000000000000000000000000000..88181b954aa14df68be9b444b3c36585f3078c0a
--- /dev/null
+++ b/models/float/wav2vec2-base-960h/vocab.json
@@ -0,0 +1 @@
+{"": 0, "": 1, "": 2, "": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}
\ No newline at end of file
diff --git a/models/loras/dreamo_quality_lora_neg.safetensors b/models/loras/dreamo_quality_lora_neg.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7d535ee0caed6f691e0b8cb9639a47b9e14c76da
--- /dev/null
+++ b/models/loras/dreamo_quality_lora_neg.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce781be640575caed2ee2b39a62cda825ec9e7033f21fe9f3764763a6389588
+size 478193864
diff --git a/models/loras/dreamo_quality_lora_pos.safetensors b/models/loras/dreamo_quality_lora_pos.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..92e4336b6031affa55f5f8ff8565475ca36b38cd
--- /dev/null
+++ b/models/loras/dreamo_quality_lora_pos.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27ce065fcb23b7cc0ed390fa4c9bdebd6e3af373badfd58ec4a8888d312f8900
+size 478193864
diff --git a/models/loras/tmp_1031573.safetensors b/models/loras/tmp_1031573.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bf3d99f173813e7dd8963470f7ff23e96c712ffa
--- /dev/null
+++ b/models/loras/tmp_1031573.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce751043a6239d5167c798d44d7e1fb70ad3685a6e550dd7de3c4f57d1c1b6f1
+size 19284600
diff --git a/models/loras/tmp_1135830.safetensors b/models/loras/tmp_1135830.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bcd30961ec9d6ac4a64398a5921fb00a42dae21e
--- /dev/null
+++ b/models/loras/tmp_1135830.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64af3707384ee18c1488670c366cc319bc50fec12b72badf8518f604a0075daa
+size 171969408
diff --git a/models/loras/tmp_1171370.safetensors b/models/loras/tmp_1171370.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9382a77e09e141b3c392207179df4d0acb16cef1
--- /dev/null
+++ b/models/loras/tmp_1171370.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6184fd5b3947be5e2913bf2e99a24002ebbf3758605673f8ce780d55907a0102
+size 228461188
diff --git a/models/loras/tmp_1192204.safetensors b/models/loras/tmp_1192204.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b3217e9a8021428b602f264b85382f1135d7ada2
--- /dev/null
+++ b/models/loras/tmp_1192204.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca562cad3be3c9ae3e44cbe54ae6e51ae43279b007ad4efbc722f9ac272dbdbc
+size 76692376
diff --git a/models/loras/tmp_1247042.safetensors b/models/loras/tmp_1247042.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a10697e3b777acdcc47277b42306f940d82266b9
--- /dev/null
+++ b/models/loras/tmp_1247042.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba79d5302bec55bbd0fd68ce3f0b84731803460590969554b26551c170deff2c
+size 469900672
diff --git a/models/loras/tmp_1264100.safetensors b/models/loras/tmp_1264100.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..301c08e6f1b291d4d5c9d758214a4aa04decb5b3
--- /dev/null
+++ b/models/loras/tmp_1264100.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7beb4ab869ee7a4f4f6bba9d9b3ab62ceb2e842ebb52be09bd860598969d5db4
+size 171969416
diff --git a/models/loras/tmp_1311584.safetensors b/models/loras/tmp_1311584.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4b7d651bfa452c0d0ce7cc6a52ad12f734076ed9
--- /dev/null
+++ b/models/loras/tmp_1311584.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa370e5eea2ef44e63f62660fe1655a42620ccbf330b808a1ecfddbb4f5d51a8
+size 171969424
diff --git a/models/loras/tmp_1352742.safetensors b/models/loras/tmp_1352742.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c20f974b527b069d5659c9bad026f8946e83b9b5
--- /dev/null
+++ b/models/loras/tmp_1352742.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3726fded83c9e9799c8f7e7eb0c01e3689fdd3734ab0f20a8cde295afd7d9c0
+size 228460668
diff --git a/models/loras/tmp_1398703.safetensors b/models/loras/tmp_1398703.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9db97097f59693a8d1bc8350acea68232be0a2fb
--- /dev/null
+++ b/models/loras/tmp_1398703.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7b5c187bd701455167161c7c4576e85d780402318b853766104542a7a71ec5d
+size 202732756
diff --git a/models/loras/tmp_1550434.safetensors b/models/loras/tmp_1550434.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ac0dbed9001fad63d9af7508a4ae9c9fe095886e
--- /dev/null
+++ b/models/loras/tmp_1550434.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3745bf10b92e50c2e2ce52ec641c17fd0f2521a6fc53289c4831fbed8a1bb6de
+size 38411192
diff --git a/models/loras/tmp_1552087.safetensors b/models/loras/tmp_1552087.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..712c2f9bcf6a99c6ed7fa40a7b19f2dbefdb24ae
--- /dev/null
+++ b/models/loras/tmp_1552087.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:374b9e35641cf591e0ad30fae49b1295c2edc5e8d4e9dcaf1f704c30502e473c
+size 916175032
diff --git a/models/loras/tmp_1640450.safetensors b/models/loras/tmp_1640450.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1e34b778a6bad1aa4fc3bff1bc5af601c3921eee
--- /dev/null
+++ b/models/loras/tmp_1640450.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad958931a6f0e0fdff4262aafd941ea9576cebde203fa5c66f5a0c42d0a5cae
+size 153268427
diff --git a/models/loras/tmp_1697082.safetensors b/models/loras/tmp_1697082.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c8793bf38c690be6a702e59113517e2a03281260
--- /dev/null
+++ b/models/loras/tmp_1697082.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ece21c3526a6985f564e589df99a1452ed8c3c10068a1d14553a8a2877e6d0b
+size 6938040456
diff --git a/models/loras/tmp_1700320.safetensors b/models/loras/tmp_1700320.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..087e1fadeb3f934f54e569daaadcb85aea06d101
--- /dev/null
+++ b/models/loras/tmp_1700320.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e54976a767f3d61713d54784de095b3a76c3bd2879d20fa6896a33550302fa7b
+size 228460204
diff --git a/models/loras/tmp_1755780.safetensors b/models/loras/tmp_1755780.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..64238ae1bcea6b310802ccfb043bf4cd1d144b4f
--- /dev/null
+++ b/models/loras/tmp_1755780.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad18966832939c6d11cee11fae64faef44dcf053078ea8e749f8fadb879d4003
+size 153268427
diff --git a/models/loras/tmp_1810235.safetensors b/models/loras/tmp_1810235.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b443fc92d8d1fe4b5c8cc8d57b92ca15777ae33b
--- /dev/null
+++ b/models/loras/tmp_1810235.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ccefa8aa0e39b8fe5d7a17b662b26bfab2b71e39d0c23aa0e0bd545939e6bdbf
+size 88008084
diff --git a/models/loras/tmp_1815766.safetensors b/models/loras/tmp_1815766.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4770695c6e0ecae8a169754aaeb633eddd554e1b
--- /dev/null
+++ b/models/loras/tmp_1815766.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5fccf082654a6f388f7fff4cbf654e88bb6e4a843b654c78a3971e03049c5
+size 19299768
diff --git a/models/loras/tmp_436124.safetensors b/models/loras/tmp_436124.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d0611b1ca22430e55dbe3c0889fbba54f4196c26
--- /dev/null
+++ b/models/loras/tmp_436124.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51ec39755561348f85a59cdba1790a8d6774a17ec012ea9f6058752d432547f7
+size 37868560
diff --git a/models/loras/tmp_707763.safetensors b/models/loras/tmp_707763.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..be9187256e9494061681697b54fc7ebdc928fd5b
--- /dev/null
+++ b/models/loras/tmp_707763.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88c93dffe46e9fae0d69e0db3da131101230451a76dd55ea3a637584f4a08df5
+size 912552748
diff --git a/models/loras/tmp_733194.safetensors b/models/loras/tmp_733194.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a068453bf4b0e4aa99e9d672b0889754f9b3479
--- /dev/null
+++ b/models/loras/tmp_733194.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54d9acab71df7d3d6bc13966be4f8f05215a7bdb77e0756df1ca9c54bbbda03f
+size 19265728
diff --git a/models/loras/tmp_753053.safetensors b/models/loras/tmp_753053.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..528f36fb55fc691681651178c558a87ce4ee0069
--- /dev/null
+++ b/models/loras/tmp_753053.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c12dbc58856651aa6bfa214ddfca07c812d9e32fb36d803258632ac207825ae1
+size 19264840
diff --git a/models/loras/tmp_771009.safetensors b/models/loras/tmp_771009.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fcf037a4a9d66907a8b6e73364bc7ca1e8bd811c
--- /dev/null
+++ b/models/loras/tmp_771009.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f823b8a8d4bfb63fb660acd52021e4b4f62074a8b71d0254877d4433324c46ac
+size 19270112
diff --git a/models/loras/tmp_954444.safetensors b/models/loras/tmp_954444.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f89a4b7c7af4ffa7c4b8b6deb83631f758197fef
--- /dev/null
+++ b/models/loras/tmp_954444.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08040dff65fa05715f011b23363c09c4c1699cf5fe1754816ee763ddc15e2444
+size 171969436
diff --git a/models/loras/tmp_967187.safetensors b/models/loras/tmp_967187.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c3e7c64cfe16cd5e2f426f2f4f3e94ee8873dc2e
--- /dev/null
+++ b/models/loras/tmp_967187.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d0d027bc56ce1410fa916689e2228589b1013fa1e4c14174da3cb734d280b53d
+size 38423176
diff --git a/models/loras/tmp_968854.safetensors b/models/loras/tmp_968854.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5fcfec0123d33cd415118197c2de31b52eb5c4fb
--- /dev/null
+++ b/models/loras/tmp_968854.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c493e519ff310beff2a81787f1ad0f032979d7967dbed9f483855c70dbc00c7
+size 202690484
diff --git a/models/loras/tmp_980197.safetensors b/models/loras/tmp_980197.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..906da69efa16ca3d56d93ad0c0b1df3026a19443
--- /dev/null
+++ b/models/loras/tmp_980197.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:736231a83ea492f8427aa01f542836df7257c59764cb9b4ed6181226be33cc61
+size 19259808
diff --git a/models/loras/tmp_993999.safetensors b/models/loras/tmp_993999.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..717e81730f25cad4d95705219e4cc883e2c4b662
--- /dev/null
+++ b/models/loras/tmp_993999.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a485ec548e540ef663eb5e8c6c0af49af4519005247d14bd022d2f5a7e21ce1
+size 306428682
diff --git a/models/rembg/briarmbg.pth b/models/rembg/briarmbg.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5a35fd28a4f04bc3a38135dec168c918632d6e8c
--- /dev/null
+++ b/models/rembg/briarmbg.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:893c16c340b1ddafc93e78457a4d94190da9b7179149f8574284c83caebf5e8c
+size 176718373
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1231959.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1231959.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e2787d91b73d490ec07fce635dc2ca079fc15853
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1231959.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a288764258642a00433bfb83579a11b9759311cb23243077231d248058f740e
+size 161300448
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1330061.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1330061.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a303f55c97f2b84a5c51607d502e8f93147c0f5
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1330061.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e610cd7c4b04005b0a67d1ceee3323f629b765ce1fe7c8275bd9d01d5cc83ce4
+size 322523816
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1397524.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1397524.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..12dabb6bc28fafba75e03f4da486c0fbdeb84fb0
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1397524.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ebd61716876a5138deb1049f5eb4a6e000d00b2b47fa05c09a589ba7722feef
+size 180930896
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1423378.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1423378.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e2938a9c919fcf3905da5ed5a4a4b3989b9df21c
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1423378.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1c8273225196ae1054f9d9bf4ff295e06e21be9733332a4de5f40eb3a5880dc
+size 180930896
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1620582.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1620582.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4f586beb35dea4ca9d87bc5f267d1f4fa0dd4e5f
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1620582.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5481578d5d26d17a6c106e3436bb9b6535e63f944c976dec0cf5f5a2b68bac8a
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1627510.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1627510.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6708cfcf8660633f97c11a1bb39d896e5a5a302e
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1627510.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a72ed919d05cb1662105e46b6ed547a8ee02d99707cdc83ab527dc8e92f808f5
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1781658.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1781658.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e9946b216a08eb51562e72cb596eebe6e399fbe0
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1781658.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05ed80615393a9cfd5be9d968d3621bdcff0886571ce4a87bb6cc3a25b371962
+size 236019040
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1808720.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1808720.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0b8a4884e29e305e04b43aa0fc5c5bda56a0886a
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1808720.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f22ea7b61630695733a9c9df30393c91acc87e1b72ec9fb32551acb7e022fed
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1826659.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1826659.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..164cf5781ab369d85d69ae964aa876d764953f1f
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1826659.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6d5b8a901d688facbce3a8f696d4961007cb71bbdf0f3105328ea38ed3e2013
+size 40384504
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1839643.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1839643.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9ffa32dadae12692779b7973983c6395253de995
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1839643.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb80aa2fb85a1c30ecb9df9b291d856ef0c4ede28ea4f23abbedf7f5cf133790
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1839803.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1839803.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1ce12623c191aac9ac688b9afdca32624faf2db2
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1839803.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abded56694ab67634d4194e7727798ef382bd9435bcf5ff2e09a8ad9435221ca
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_civit_1845780.safetensors b/models/tmp_hunyuan_loras/tmp_civit_1845780.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f0d4db896553a651e3b01b09d1f3522c5458dbe
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_civit_1845780.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d04a1d4533afe454db50f7ef1ef4afdf5fbacf6e7626f62a44b57aff001d70a7
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_comfyonline_0207d854-a0d9-4858-9ac8-e2ab7adbf5b8.safetensors b/models/tmp_hunyuan_loras/tmp_comfyonline_0207d854-a0d9-4858-9ac8-e2ab7adbf5b8.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b98df550dd8a7ab4eabf20ab12d70eb44e0f8822
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_comfyonline_0207d854-a0d9-4858-9ac8-e2ab7adbf5b8.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d86a328e38086c329d61103ea82afdca3482a02714131337feabb16f322c76e
+size 322519480
diff --git a/models/tmp_hunyuan_loras/tmp_comfyonline_999c46e3-d08c-4084-bf26-8c1a9ceb451d.safetensors b/models/tmp_hunyuan_loras/tmp_comfyonline_999c46e3-d08c-4084-bf26-8c1a9ceb451d.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ba5d7daab54f3643e519126530babc8ccb08d93a
--- /dev/null
+++ b/models/tmp_hunyuan_loras/tmp_comfyonline_999c46e3-d08c-4084-bf26-8c1a9ceb451d.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21a6caa226c9f0888a61c01008213df129d2150b31326ba09b11c1e8d55e7cf7
+size 322519480
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1594927.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1594927.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f25fe54307ff9697fd09a2053b46f483538df5aa
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1594927.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7cb374d17d46777b3910164b7524a819ef41dd80d15f2d728181f21ea11e45f8
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1621698.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1621698.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..edac2aa42a776b1aef48b317a46a71ba63387e20
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1621698.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:379c267ca9e6021bde9763c74a76117335181b189edbcd06e29acf1d16f6af10
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1623136.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1623136.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..095cf442adc3385cb2150e8404442f27bed1cf5a
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1623136.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33f12708a12757c65bfd90a4471e52236c2420054957b8edc4516ad407f4899b
+size 359257680
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1623701.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1623701.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a545c43b3277cf95b3281d3abb34f82f80eeaf5d
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1623701.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:029195bc1220a672765a80901cc1c3f9e1a0aa08ef49f4d5fd38d832c8701fd5
+size 359257680
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1663271.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1663271.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f5d2d77ac77c679ba7e1232356a99355ebaa0194
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1663271.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:031a64c48c180f663869ab4565465cecf095cba0f66d58c806cba06ad3bc1e6f
+size 359257680
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1694292.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1694292.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b5f8b6095e647054ea949cd70cfc5967895b070b
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1694292.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62b08da970d427899ca322ccf20c82092aa7d92a41b67be13161a2b05f064f69
+size 359257680
diff --git a/models/tmp_wanvideo_loras/tmp_civit_1803667.safetensors b/models/tmp_wanvideo_loras/tmp_civit_1803667.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..649d58e668927dea19267aa019c9cec0ab263113
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_civit_1803667.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18be3802a4533456070054ee7622f1046eaba4f0d93888dfb6e1ddcd3e8e7cb0
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_comfyonline_961c2387-bd6b-4841-a757-b07085474e9c.safetensors b/models/tmp_wanvideo_loras/tmp_comfyonline_961c2387-bd6b-4841-a757-b07085474e9c.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..74539a68de473cb7d18d43c17e4af01dcabb0c33
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_comfyonline_961c2387-bd6b-4841-a757-b07085474e9c.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd45e73e7ffd23b05b26fda6e6fcde53984c79ab16c0cf29405642a0fe458941
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan21_CausVid_14B_T2V_lora_rank32.safetensors.safetensors b/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan21_CausVid_14B_T2V_lora_rank32.safetensors.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d6742199f1061c24f44ff43dabebfbf278a383c5
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan21_CausVid_14B_T2V_lora_rank32.safetensors.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5664f709f39b8352e8487e412efdac5c2fc283f21d06e289a19344daaaa198d
+size 319116504
diff --git a/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan2_1-T2V-14B_CausVid_fp8_e4m3fn.safetensors.safetensors b/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan2_1-T2V-14B_CausVid_fp8_e4m3fn.safetensors.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b77b8804af8fb82831f47f67cdc7211c43aa3f95
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_hf_Kijai_WanVideo_comfy_Wan2_1-T2V-14B_CausVid_fp8_e4m3fn.safetensors.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5803b5b33e6f230f16e6f52c5380f1bc5a47a60a58c85e37376d76dfab1599db
+size 10250248192
diff --git a/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_B1gBnqK-wan.safetensors.safetensors b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_B1gBnqK-wan.safetensors.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ecdb34035d1730348e46b44aa516e8bab734fad4
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_B1gBnqK-wan.safetensors.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a93c7ff6233b11bd818d9c2b3b9db1ae5145a0f51c8a11a06c2b2618a05709e
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_bugsbunny.safetensors.safetensors b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_bugsbunny.safetensors.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f80473203513abd26dde086446298475a20ebdd4
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_bugsbunny.safetensors.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31f7372189fbdcb288a222441702ccf4616b064d41bff4f717d7524cd408fc03
+size 306807976
diff --git a/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_twerking-lora.safetensors.safetensors b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_twerking-lora.safetensors.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b5304b9307c7eab6ad2b23cd21327d1dc44b20ae
--- /dev/null
+++ b/models/tmp_wanvideo_loras/tmp_hf_ybxh_3main_twerking-lora.safetensors.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9943aac075d1c07d7a3ab388aabdb031d5f0e6bb6080ff59299f0dfe98b88d3
+size 306807976
diff --git a/models/transformers/facebook/wav2vec2-base-960h/.gitattributes b/models/transformers/facebook/wav2vec2-base-960h/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..cf6d51fc9b1a671c35e92d6bd009880937aaa12d
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/.gitattributes
@@ -0,0 +1,18 @@
+*.bin.* filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tar.gz filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+model.safetensors filter=lfs diff=lfs merge=lfs -text
diff --git a/models/transformers/facebook/wav2vec2-base-960h/README.md b/models/transformers/facebook/wav2vec2-base-960h/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7fe2047d7ac9b9816848c657b2a492ee95b264b
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/README.md
@@ -0,0 +1,128 @@
+---
+language: en
+datasets:
+- librispeech_asr
+tags:
+- audio
+- automatic-speech-recognition
+- hf-asr-leaderboard
+license: apache-2.0
+widget:
+- example_title: Librispeech sample 1
+ src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
+- example_title: Librispeech sample 2
+ src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
+model-index:
+- name: wav2vec2-base-960h
+ results:
+ - task:
+ name: Automatic Speech Recognition
+ type: automatic-speech-recognition
+ dataset:
+ name: LibriSpeech (clean)
+ type: librispeech_asr
+ config: clean
+ split: test
+ args:
+ language: en
+ metrics:
+ - name: Test WER
+ type: wer
+ value: 3.4
+ - task:
+ name: Automatic Speech Recognition
+ type: automatic-speech-recognition
+ dataset:
+ name: LibriSpeech (other)
+ type: librispeech_asr
+ config: other
+ split: test
+ args:
+ language: en
+ metrics:
+ - name: Test WER
+ type: wer
+ value: 8.6
+---
+
+# Wav2Vec2-Base-960h
+
+[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
+
+The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio. When using the model
+make sure that your speech input is also sampled at 16Khz.
+
+[Paper](https://arxiv.org/abs/2006.11477)
+
+Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli
+
+**Abstract**
+
+We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.
+
+The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
+
+
+# Usage
+
+To transcribe audio files the model can be used as a standalone acoustic model as follows:
+
+```python
+ from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
+ from datasets import load_dataset
+ import torch
+
+ # load model and tokenizer
+ processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
+ model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
+
+ # load dummy dataset and read soundfiles
+ ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
+
+ # tokenize
+ input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1
+
+ # retrieve logits
+ logits = model(input_values).logits
+
+ # take argmax and decode
+ predicted_ids = torch.argmax(logits, dim=-1)
+ transcription = processor.batch_decode(predicted_ids)
+ ```
+
+ ## Evaluation
+
+ This code snippet shows how to evaluate **facebook/wav2vec2-base-960h** on LibriSpeech's "clean" and "other" test data.
+
+```python
+from datasets import load_dataset
+from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
+import torch
+from jiwer import wer
+
+
+librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
+
+model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
+processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
+
+def map_to_pred(batch):
+ input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
+ with torch.no_grad():
+ logits = model(input_values.to("cuda")).logits
+
+ predicted_ids = torch.argmax(logits, dim=-1)
+ transcription = processor.batch_decode(predicted_ids)
+ batch["transcription"] = transcription
+ return batch
+
+result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["audio"])
+
+print("WER:", wer(result["text"], result["transcription"]))
+```
+
+*Result (WER)*:
+
+| "clean" | "other" |
+|---|---|
+| 3.4 | 8.6 |
\ No newline at end of file
diff --git a/models/transformers/facebook/wav2vec2-base-960h/config.json b/models/transformers/facebook/wav2vec2-base-960h/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8ca9cc7496e145e37d09cec17d0c3bf9b8523c8e
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/config.json
@@ -0,0 +1,77 @@
+{
+ "_name_or_path": "facebook/wav2vec2-base-960h",
+ "activation_dropout": 0.1,
+ "apply_spec_augment": true,
+ "architectures": [
+ "Wav2Vec2ForCTC"
+ ],
+ "attention_dropout": 0.1,
+ "bos_token_id": 1,
+ "codevector_dim": 256,
+ "contrastive_logits_temperature": 0.1,
+ "conv_bias": false,
+ "conv_dim": [
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512
+ ],
+ "conv_kernel": [
+ 10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2
+ ],
+ "conv_stride": [
+ 5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2
+ ],
+ "ctc_loss_reduction": "sum",
+ "ctc_zero_infinity": false,
+ "diversity_loss_weight": 0.1,
+ "do_stable_layer_norm": false,
+ "eos_token_id": 2,
+ "feat_extract_activation": "gelu",
+ "feat_extract_dropout": 0.0,
+ "feat_extract_norm": "group",
+ "feat_proj_dropout": 0.1,
+ "feat_quantizer_dropout": 0.0,
+ "final_dropout": 0.1,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout": 0.1,
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 768,
+ "initializer_range": 0.02,
+ "intermediate_size": 3072,
+ "layer_norm_eps": 1e-05,
+ "layerdrop": 0.1,
+ "mask_feature_length": 10,
+ "mask_feature_prob": 0.0,
+ "mask_time_length": 10,
+ "mask_time_prob": 0.05,
+ "model_type": "wav2vec2",
+ "num_attention_heads": 12,
+ "num_codevector_groups": 2,
+ "num_codevectors_per_group": 320,
+ "num_conv_pos_embedding_groups": 16,
+ "num_conv_pos_embeddings": 128,
+ "num_feat_extract_layers": 7,
+ "num_hidden_layers": 12,
+ "num_negatives": 100,
+ "pad_token_id": 0,
+ "proj_codevector_dim": 256,
+ "transformers_version": "4.7.0.dev0",
+ "vocab_size": 32
+}
diff --git a/models/transformers/facebook/wav2vec2-base-960h/feature_extractor_config.json b/models/transformers/facebook/wav2vec2-base-960h/feature_extractor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..52fdd74dc06f40033506e402269fbde5e7adc21d
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/feature_extractor_config.json
@@ -0,0 +1,8 @@
+{
+ "do_normalize": true,
+ "feature_dim": 1,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/models/transformers/facebook/wav2vec2-base-960h/model.safetensors b/models/transformers/facebook/wav2vec2-base-960h/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..942562678fb28df86c055027c18216fa2a7cb5dd
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8aa76ab2243c81747a1f832954586bc566090c83a0ac167df6f31f0fa917d74a
+size 377607901
diff --git a/models/transformers/facebook/wav2vec2-base-960h/preprocessor_config.json b/models/transformers/facebook/wav2vec2-base-960h/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3f24dc078fcba55ee1d417a413847ead40c093a3
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/preprocessor_config.json
@@ -0,0 +1,8 @@
+{
+ "do_normalize": true,
+ "feature_size": 1,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/models/transformers/facebook/wav2vec2-base-960h/special_tokens_map.json b/models/transformers/facebook/wav2vec2-base-960h/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..25bc39604f72700b3b8e10bd69bb2f227157edd1
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/special_tokens_map.json
@@ -0,0 +1 @@
+{"bos_token": "", "eos_token": "", "unk_token": "", "pad_token": ""}
\ No newline at end of file
diff --git a/models/transformers/facebook/wav2vec2-base-960h/tokenizer_config.json b/models/transformers/facebook/wav2vec2-base-960h/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..978a15a96dbb2d23e2afbc70137cae6c5ce38c8d
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/tokenizer_config.json
@@ -0,0 +1 @@
+{"unk_token": "", "bos_token": "", "eos_token": "", "pad_token": "", "do_lower_case": false, "return_attention_mask": false, "do_normalize": true}
\ No newline at end of file
diff --git a/models/transformers/facebook/wav2vec2-base-960h/vocab.json b/models/transformers/facebook/wav2vec2-base-960h/vocab.json
new file mode 100644
index 0000000000000000000000000000000000000000..88181b954aa14df68be9b444b3c36585f3078c0a
--- /dev/null
+++ b/models/transformers/facebook/wav2vec2-base-960h/vocab.json
@@ -0,0 +1 @@
+{"": 0, "": 1, "": 2, "": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}
\ No newline at end of file
diff --git a/models/vae/bopbt/VAE_A_quality/latest_net_G.pth b/models/vae/bopbt/VAE_A_quality/latest_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1173e8d2510cc046bd1fa3a5997a5349ea1107db
--- /dev/null
+++ b/models/vae/bopbt/VAE_A_quality/latest_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de39b6d0081746995afd2393a94a11a10ae052fd9484b4fb06b7fa6bd28dcf5d
+size 3498887
diff --git a/models/vae/bopbt/VAE_B_quality/latest_net_G.pth b/models/vae/bopbt/VAE_B_quality/latest_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5eb76d34159f208e4a2791b6f0e2631232a6e67d
--- /dev/null
+++ b/models/vae/bopbt/VAE_B_quality/latest_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e40d6ea7acf27f1420abd29197086c21d0c2a7b8fe98a9e09c60a339457a830
+size 3498887
diff --git a/models/vae/bopbt/VAE_B_scratch/latest_net_G.pth b/models/vae/bopbt/VAE_B_scratch/latest_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..790534655029db70f8f091040eca687ba8305e4d
--- /dev/null
+++ b/models/vae/bopbt/VAE_B_scratch/latest_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e22bd7274229beaabf27427646d4623c7b5a302829dd9d466514ff639ba40f6a
+size 3498887