Image-to-Video
Diffusers
Safetensors
English
LTX2Pipeline
text-to-video
ltx-2
ltx-2-3
ltx-video
lightricks
Instructions to use CalamitousFelicitousness/LTX-2.3-dev-Diffusers with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use CalamitousFelicitousness/LTX-2.3-dev-Diffusers with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline from diffusers.utils import load_image, export_to_video # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("CalamitousFelicitousness/LTX-2.3-dev-Diffusers", dtype=torch.bfloat16, device_map="cuda") pipe.to("cuda") prompt = "A man with short gray hair plays a red electric guitar." image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png" ) output = pipe(image=image, prompt=prompt).frames[0] export_to_video(output, "output.mp4") - Notebooks
- Google Colab
- Kaggle
Upload folder using huggingface_hub
Browse files- vae/config.json +18 -6
- vae/diffusion_pytorch_model.safetensors +2 -2
vae/config.json
CHANGED
|
@@ -5,11 +5,12 @@
|
|
| 5 |
256,
|
| 6 |
512,
|
| 7 |
1024,
|
| 8 |
-
|
| 9 |
],
|
| 10 |
"decoder_block_out_channels": [
|
| 11 |
256,
|
| 12 |
512,
|
|
|
|
| 13 |
1024
|
| 14 |
],
|
| 15 |
"decoder_causal": false,
|
|
@@ -17,16 +18,19 @@
|
|
| 17 |
false,
|
| 18 |
false,
|
| 19 |
false,
|
|
|
|
| 20 |
false
|
| 21 |
],
|
| 22 |
"decoder_layers_per_block": [
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
| 27 |
],
|
| 28 |
"decoder_spatial_padding_mode": "reflect",
|
| 29 |
"decoder_spatio_temporal_scaling": [
|
|
|
|
| 30 |
true,
|
| 31 |
true,
|
| 32 |
true
|
|
@@ -50,7 +54,7 @@
|
|
| 50 |
"layers_per_block": [
|
| 51 |
4,
|
| 52 |
6,
|
| 53 |
-
|
| 54 |
2,
|
| 55 |
2
|
| 56 |
],
|
|
@@ -71,11 +75,19 @@
|
|
| 71 |
"upsample_factor": [
|
| 72 |
2,
|
| 73 |
2,
|
|
|
|
| 74 |
2
|
| 75 |
],
|
| 76 |
"upsample_residual": [
|
|
|
|
| 77 |
true,
|
| 78 |
true,
|
| 79 |
true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
]
|
| 81 |
}
|
|
|
|
| 5 |
256,
|
| 6 |
512,
|
| 7 |
1024,
|
| 8 |
+
1024
|
| 9 |
],
|
| 10 |
"decoder_block_out_channels": [
|
| 11 |
256,
|
| 12 |
512,
|
| 13 |
+
512,
|
| 14 |
1024
|
| 15 |
],
|
| 16 |
"decoder_causal": false,
|
|
|
|
| 18 |
false,
|
| 19 |
false,
|
| 20 |
false,
|
| 21 |
+
false,
|
| 22 |
false
|
| 23 |
],
|
| 24 |
"decoder_layers_per_block": [
|
| 25 |
+
4,
|
| 26 |
+
6,
|
| 27 |
+
4,
|
| 28 |
+
2,
|
| 29 |
+
2
|
| 30 |
],
|
| 31 |
"decoder_spatial_padding_mode": "reflect",
|
| 32 |
"decoder_spatio_temporal_scaling": [
|
| 33 |
+
true,
|
| 34 |
true,
|
| 35 |
true,
|
| 36 |
true
|
|
|
|
| 54 |
"layers_per_block": [
|
| 55 |
4,
|
| 56 |
6,
|
| 57 |
+
4,
|
| 58 |
2,
|
| 59 |
2
|
| 60 |
],
|
|
|
|
| 75 |
"upsample_factor": [
|
| 76 |
2,
|
| 77 |
2,
|
| 78 |
+
1,
|
| 79 |
2
|
| 80 |
],
|
| 81 |
"upsample_residual": [
|
| 82 |
+
true,
|
| 83 |
true,
|
| 84 |
true,
|
| 85 |
true
|
| 86 |
+
],
|
| 87 |
+
"upsample_type": [
|
| 88 |
+
"spatial",
|
| 89 |
+
"temporal",
|
| 90 |
+
"spatiotemporal",
|
| 91 |
+
"spatiotemporal"
|
| 92 |
]
|
| 93 |
}
|
vae/diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98404b1826a38b4ffc4b47ec5c4db71dce889eb84ab52fafd21b5116afe76636
|
| 3 |
+
size 1452233162
|