Buckets:
| import{s as zs,o as Ss,n as Bt}from"../chunks/scheduler.8c3d61f6.js";import{S as Ps,i as As,g as l,s,r as h,A as Ds,h as i,f as n,c as o,j as w,x as T,u as g,k as j,l as qs,y as t,a as m,v as f,d as M,t as y,w as _}from"../chunks/index.da70eac4.js";import{T as Os}from"../chunks/Tip.1d9b8c37.js";import{D as I}from"../chunks/Docstring.c021b19a.js";import{C as z}from"../chunks/CodeBlock.a9c4becf.js";import{E as ps}from"../chunks/ExampleCodeBlock.56b4589c.js";import{H as De,E as Ks}from"../chunks/getInferenceSnippets.725ed3d4.js";import{H as eo,a as $s}from"../chunks/HfOption.6c3b4e77.js";function to(G){let r,J="Click on the LTX-Video models in the right sidebar for more examples of other video generation tasks.";return{c(){r=l("p"),r.textContent=J},l(u){r=i(u,"P",{"data-svelte-h":!0}),T(r)!=="svelte-1gv8zxf"&&(r.textContent=J)},m(u,c){m(u,r,c)},p:Bt,d(u){u&&n(r)}}}function no(G){let r,J='Refer to the <a href="../../optimization/memory">Reduce memory usage</a> guide for more details about the various memory saving techniques.',u,c,p="The LTX-Video model below requires ~10GB of VRAM.",a,b,X;return b=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYUGlwZWxpbmUlMkMlMjBBdXRvTW9kZWwlMEFmcm9tJTIwZGlmZnVzZXJzLmhvb2tzJTIwaW1wb3J0JTIwYXBwbHlfZ3JvdXBfb2ZmbG9hZGluZyUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBleHBvcnRfdG9fdmlkZW8lMEElMEElMjMlMjBmcDglMjBsYXllcndpc2UlMjB3ZWlnaHQtY2FzdGluZyUwQXRyYW5zZm9ybWVyJTIwJTNEJTIwQXV0b01vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIydHJhbnNmb3JtZXIlMjIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXRyYW5zZm9ybWVyLmVuYWJsZV9sYXllcndpc2VfY2FzdGluZyglMEElMjAlMjAlMjAlMjBzdG9yYWdlX2R0eXBlJTNEdG9yY2guZmxvYXQ4X2U0bTNmbiUyQyUyMGNvbXB1dGVfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEElMEFwaXBlbGluZSUyMCUzRCUyMExUWFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTIwdHJhbnNmb3JtZXIlM0R0cmFuc2Zvcm1lciUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBJTBBJTIzJTIwZ3JvdXAtb2ZmbG9hZGluZyUwQW9ubG9hZF9kZXZpY2UlMjAlM0QlMjB0b3JjaC5kZXZpY2UoJTIyY3VkYSUyMiklMEFvZmZsb2FkX2RldmljZSUyMCUzRCUyMHRvcmNoLmRldmljZSglMjJjcHUlMjIpJTBBcGlwZWxpbmUudHJhbnNmb3JtZXIuZW5hYmxlX2dyb3VwX29mZmxvYWQob25sb2FkX2RldmljZSUzRG9ubG9hZF9kZXZpY2UlMkMlMjBvZmZsb2FkX2RldmljZSUzRG9mZmxvYWRfZGV2aWNlJTJDJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiUyQyUyMHVzZV9zdHJlYW0lM0RUcnVlKSUwQWFwcGx5X2dyb3VwX29mZmxvYWRpbmcocGlwZWxpbmUudGV4dF9lbmNvZGVyJTJDJTIwb25sb2FkX2RldmljZSUzRG9ubG9hZF9kZXZpY2UlMkMlMjBvZmZsb2FkX3R5cGUlM0QlMjJibG9ja19sZXZlbCUyMiUyQyUyMG51bV9ibG9ja3NfcGVyX2dyb3VwJTNEMiklMEFhcHBseV9ncm91cF9vZmZsb2FkaW5nKHBpcGVsaW5lLnZhZSUyQyUyMG9ubG9hZF9kZXZpY2UlM0RvbmxvYWRfZGV2aWNlJTJDJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjIlMjIlMjIlMEFBJTIwd29tYW4lMjB3aXRoJTIwbG9uZyUyMGJyb3duJTIwaGFpciUyMGFuZCUyMGxpZ2h0JTIwc2tpbiUyMHNtaWxlcyUyMGF0JTIwYW5vdGhlciUyMHdvbWFuJTIwd2l0aCUyMGxvbmclMjBibG9uZGUlMjBoYWlyLiUwQVRoZSUyMHdvbWFuJTIwd2l0aCUyMGJyb3duJTIwaGFpciUyMHdlYXJzJTIwYSUyMGJsYWNrJTIwamFja2V0JTIwYW5kJTIwaGFzJTIwYSUyMHNtYWxsJTJDJTIwYmFyZWx5JTIwbm90aWNlYWJsZSUyMG1vbGUlMjBvbiUyMGhlciUyMHJpZ2h0JTIwY2hlZWsuJTBBVGhlJTIwY2FtZXJhJTIwYW5nbGUlMjBpcyUyMGElMjBjbG9zZS11cCUyQyUyMGZvY3VzZWQlMjBvbiUyMHRoZSUyMHdvbWFuJTIwd2l0aCUyMGJyb3duJTIwaGFpcidzJTIwZmFjZS4lMjBUaGUlMjBsaWdodGluZyUyMGlzJTIwd2FybSUyMGFuZCUyMCUwQW5hdHVyYWwlMkMlMjBsaWtlbHklMjBmcm9tJTIwdGhlJTIwc2V0dGluZyUyMHN1biUyQyUyMGNhc3RpbmclMjBhJTIwc29mdCUyMGdsb3clMjBvbiUyMHRoZSUyMHNjZW5lLiUyMFRoZSUyMHNjZW5lJTIwYXBwZWFycyUyMHRvJTIwYmUlMjByZWFsLWxpZmUlMjBmb290YWdlJTBBJTIyJTIyJTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIyd29yc3QlMjBxdWFsaXR5JTJDJTIwaW5jb25zaXN0ZW50JTIwbW90aW9uJTJDJTIwYmx1cnJ5JTJDJTIwaml0dGVyeSUyQyUyMGRpc3RvcnRlZCUyMiUwQSUwQXZpZGVvJTIwJTNEJTIwcGlwZWxpbmUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q3NjglMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q1MTIlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYxJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX3RpbWVzdGVwJTNEMC4wMyUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBKS5mcmFtZXMlNUIwJTVEJTBBZXhwb3J0X3RvX3ZpZGVvKHZpZGVvJTJDJTIwJTIyb3V0cHV0Lm1wNCUyMiUyQyUyMGZwcyUzRDI0KQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline, AutoModel | |
| <span class="hljs-keyword">from</span> diffusers.hooks <span class="hljs-keyword">import</span> apply_group_offloading | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-comment"># fp8 layerwise weight-casting</span> | |
| transformer = AutoModel.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| transformer.enable_layerwise_casting( | |
| storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16 | |
| ) | |
| pipeline = LTXPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video"</span>, transformer=transformer, torch_dtype=torch.bfloat16) | |
| <span class="hljs-comment"># group-offloading</span> | |
| onload_device = torch.device(<span class="hljs-string">"cuda"</span>) | |
| offload_device = torch.device(<span class="hljs-string">"cpu"</span>) | |
| pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type=<span class="hljs-string">"leaf_level"</span>, use_stream=<span class="hljs-literal">True</span>) | |
| apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type=<span class="hljs-string">"block_level"</span>, num_blocks_per_group=<span class="hljs-number">2</span>) | |
| apply_group_offloading(pipeline.vae, onload_device=onload_device, offload_type=<span class="hljs-string">"leaf_level"</span>) | |
| prompt = <span class="hljs-string">""" | |
| A woman with long brown hair and light skin smiles at another woman with long blonde hair. | |
| The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. | |
| The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and | |
| natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage | |
| """</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| video = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=<span class="hljs-number">768</span>, | |
| height=<span class="hljs-number">512</span>, | |
| num_frames=<span class="hljs-number">161</span>, | |
| decode_timestep=<span class="hljs-number">0.03</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){r=l("p"),r.innerHTML=J,u=s(),c=l("p"),c.textContent=p,a=s(),h(b.$$.fragment)},l(U){r=i(U,"P",{"data-svelte-h":!0}),T(r)!=="svelte-iowzkr"&&(r.innerHTML=J),u=o(U),c=i(U,"P",{"data-svelte-h":!0}),T(c)!=="svelte-7p0ppy"&&(c.textContent=p),a=o(U),g(b.$$.fragment,U)},m(U,C){m(U,r,C),m(U,u,C),m(U,c,C),m(U,a,C),f(b,U,C),X=!0},p:Bt,i(U){X||(M(b.$$.fragment,U),X=!0)},o(U){y(b.$$.fragment,U),X=!1},d(U){U&&(n(r),n(u),n(c),n(a)),_(b,U)}}}function so(G){let r,J='<a href="../../optimization/fp16#torchcompile">Compilation</a> is slow the first time but subsequent calls to the pipeline are faster.',u,c,p;return c=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBMVFhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBJTBBJTIzJTIwdG9yY2guY29tcGlsZSUwQXBpcGVsaW5lLnRyYW5zZm9ybWVyLnRvKG1lbW9yeV9mb3JtYXQlM0R0b3JjaC5jaGFubmVsc19sYXN0KSUwQXBpcGVsaW5lLnRyYW5zZm9ybWVyJTIwJTNEJTIwdG9yY2guY29tcGlsZSglMEElMjAlMjAlMjAlMjBwaXBlbGluZS50cmFuc2Zvcm1lciUyQyUyMG1vZGUlM0QlMjJtYXgtYXV0b3R1bmUlMjIlMkMlMjBmdWxsZ3JhcGglM0RUcnVlJTBBKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMiUyMiUyMiUwQUElMjB3b21hbiUyMHdpdGglMjBsb25nJTIwYnJvd24lMjBoYWlyJTIwYW5kJTIwbGlnaHQlMjBza2luJTIwc21pbGVzJTIwYXQlMjBhbm90aGVyJTIwd29tYW4lMjB3aXRoJTIwbG9uZyUyMGJsb25kZSUyMGhhaXIuJTBBVGhlJTIwd29tYW4lMjB3aXRoJTIwYnJvd24lMjBoYWlyJTIwd2VhcnMlMjBhJTIwYmxhY2slMjBqYWNrZXQlMjBhbmQlMjBoYXMlMjBhJTIwc21hbGwlMkMlMjBiYXJlbHklMjBub3RpY2VhYmxlJTIwbW9sZSUyMG9uJTIwaGVyJTIwcmlnaHQlMjBjaGVlay4lMEFUaGUlMjBjYW1lcmElMjBhbmdsZSUyMGlzJTIwYSUyMGNsb3NlLXVwJTJDJTIwZm9jdXNlZCUyMG9uJTIwdGhlJTIwd29tYW4lMjB3aXRoJTIwYnJvd24lMjBoYWlyJ3MlMjBmYWNlLiUyMFRoZSUyMGxpZ2h0aW5nJTIwaXMlMjB3YXJtJTIwYW5kJTIwJTBBbmF0dXJhbCUyQyUyMGxpa2VseSUyMGZyb20lMjB0aGUlMjBzZXR0aW5nJTIwc3VuJTJDJTIwY2FzdGluZyUyMGElMjBzb2Z0JTIwZ2xvdyUyMG9uJTIwdGhlJTIwc2NlbmUuJTIwVGhlJTIwc2NlbmUlMjBhcHBlYXJzJTIwdG8lMjBiZSUyMHJlYWwtbGlmZSUyMGZvb3RhZ2UlMEElMjIlMjIlMjIlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjJ3b3JzdCUyMHF1YWxpdHklMkMlMjBpbmNvbnNpc3RlbnQlMjBtb3Rpb24lMkMlMjBibHVycnklMkMlMjBqaXR0ZXJ5JTJDJTIwZGlzdG9ydGVkJTIyJTBBJTBBdmlkZW8lMjAlM0QlMjBwaXBlbGluZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDc2OCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDUxMiUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0QxNjElMkMlMEElMjAlMjAlMjAlMjBkZWNvZGVfdGltZXN0ZXAlM0QwLjAzJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX25vaXNlX3NjYWxlJTNEMC4wMjUlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fdmlkZW8odmlkZW8lMkMlMjAlMjJvdXRwdXQubXA0JTIyJTJDJTIwZnBzJTNEMjQp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| pipeline = LTXPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| <span class="hljs-comment"># torch.compile</span> | |
| pipeline.transformer.to(memory_format=torch.channels_last) | |
| pipeline.transformer = torch.<span class="hljs-built_in">compile</span>( | |
| pipeline.transformer, mode=<span class="hljs-string">"max-autotune"</span>, fullgraph=<span class="hljs-literal">True</span> | |
| ) | |
| prompt = <span class="hljs-string">""" | |
| A woman with long brown hair and light skin smiles at another woman with long blonde hair. | |
| The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. | |
| The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and | |
| natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage | |
| """</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| video = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=<span class="hljs-number">768</span>, | |
| height=<span class="hljs-number">512</span>, | |
| num_frames=<span class="hljs-number">161</span>, | |
| decode_timestep=<span class="hljs-number">0.03</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){r=l("p"),r.innerHTML=J,u=s(),h(c.$$.fragment)},l(a){r=i(a,"P",{"data-svelte-h":!0}),T(r)!=="svelte-dcc01q"&&(r.innerHTML=J),u=o(a),g(c.$$.fragment,a)},m(a,b){m(a,r,b),m(a,u,b),f(c,a,b),p=!0},p:Bt,i(a){p||(M(c.$$.fragment,a),p=!0)},o(a){y(c.$$.fragment,a),p=!1},d(a){a&&(n(r),n(u)),_(c,a)}}}function oo(G){let r,J,u,c;return r=new $s({props:{id:"usage",option:"memory",$$slots:{default:[no]},$$scope:{ctx:G}}}),u=new $s({props:{id:"usage",option:"inference speed",$$slots:{default:[so]},$$scope:{ctx:G}}}),{c(){h(r.$$.fragment),J=s(),h(u.$$.fragment)},l(p){g(r.$$.fragment,p),J=o(p),g(u.$$.fragment,p)},m(p,a){f(r,p,a),m(p,J,a),f(u,p,a),c=!0},p(p,a){const b={};a&2&&(b.$$scope={dirty:a,ctx:p}),r.$set(b);const X={};a&2&&(X.$$scope={dirty:a,ctx:p}),u.$set(X)},i(p){c||(M(r.$$.fragment,p),M(u.$$.fragment,p),c=!0)},o(p){y(r.$$.fragment,p),y(u.$$.fragment,p),c=!1},d(p){p&&n(J),_(r,p),_(u,p)}}}function ao(G){let r,J="Examples:",u,c,p;return c=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBJTBBcGlwZSUyMCUzRCUyMExUWFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHdvbWFuJTIwd2l0aCUyMGxvbmclMjBicm93biUyMGhhaXIlMjBhbmQlMjBsaWdodCUyMHNraW4lMjBzbWlsZXMlMjBhdCUyMGFub3RoZXIlMjB3b21hbiUyMHdpdGglMjBsb25nJTIwYmxvbmRlJTIwaGFpci4lMjBUaGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIlMjB3ZWFycyUyMGElMjBibGFjayUyMGphY2tldCUyMGFuZCUyMGhhcyUyMGElMjBzbWFsbCUyQyUyMGJhcmVseSUyMG5vdGljZWFibGUlMjBtb2xlJTIwb24lMjBoZXIlMjByaWdodCUyMGNoZWVrLiUyMFRoZSUyMGNhbWVyYSUyMGFuZ2xlJTIwaXMlMjBhJTIwY2xvc2UtdXAlMkMlMjBmb2N1c2VkJTIwb24lMjB0aGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIncyUyMGZhY2UuJTIwVGhlJTIwbGlnaHRpbmclMjBpcyUyMHdhcm0lMjBhbmQlMjBuYXR1cmFsJTJDJTIwbGlrZWx5JTIwZnJvbSUyMHRoZSUyMHNldHRpbmclMjBzdW4lMkMlMjBjYXN0aW5nJTIwYSUyMHNvZnQlMjBnbG93JTIwb24lMjB0aGUlMjBzY2VuZS4lMjBUaGUlMjBzY2VuZSUyMGFwcGVhcnMlMjB0byUyMGJlJTIwcmVhbC1saWZlJTIwZm9vdGFnZSUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEElMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q3MDQlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q0ODAlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYxJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBKS5mcmFtZXMlNUIwJTVEJTBBZXhwb3J0X3RvX3ZpZGVvKHZpZGVvJTJDJTIwJTIyb3V0cHV0Lm1wNCUyMiUyQyUyMGZwcyUzRDI0KQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-meta">>>> </span>pipe = LTXPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">704</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">480</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){r=l("p"),r.textContent=J,u=s(),h(c.$$.fragment)},l(a){r=i(a,"P",{"data-svelte-h":!0}),T(r)!=="svelte-kvfsh7"&&(r.textContent=J),u=o(a),g(c.$$.fragment,a)},m(a,b){m(a,r,b),m(a,u,b),f(c,a,b),p=!0},p:Bt,i(a){p||(M(c.$$.fragment,a),p=!0)},o(a){y(c.$$.fragment,a),p=!1},d(a){a&&(n(r),n(u)),_(c,a)}}}function lo(G){let r,J="Examples:",u,c,p;return c=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYSW1hZ2VUb1ZpZGVvUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTJDJTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBMVFhJbWFnZVRvVmlkZW9QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZhLXItci1vLXclMkZ0aW55LW1lbWUtZGF0YXNldC1jYXB0aW9uZWQlMkZyZXNvbHZlJTJGbWFpbiUyRmltYWdlcyUyRjgucG5nJTIyJTBBKSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjB5b3VuZyUyMGdpcmwlMjBzdGFuZHMlMjBjYWxtbHklMjBpbiUyMHRoZSUyMGZvcmVncm91bmQlMkMlMjBsb29raW5nJTIwZGlyZWN0bHklMjBhdCUyMHRoZSUyMGNhbWVyYSUyQyUyMGFzJTIwYSUyMGhvdXNlJTIwZmlyZSUyMHJhZ2VzJTIwaW4lMjB0aGUlMjBiYWNrZ3JvdW5kLiUyMEZsYW1lcyUyMGVuZ3VsZiUyMHRoZSUyMHN0cnVjdHVyZSUyQyUyMHdpdGglMjBzbW9rZSUyMGJpbGxvd2luZyUyMGludG8lMjB0aGUlMjBhaXIuJTIwRmlyZWZpZ2h0ZXJzJTIwaW4lMjBwcm90ZWN0aXZlJTIwZ2VhciUyMHJ1c2glMjB0byUyMHRoZSUyMHNjZW5lJTJDJTIwYSUyMGZpcmUlMjB0cnVjayUyMGxhYmVsZWQlMjAnMzgnJTIwdmlzaWJsZSUyMGJlaGluZCUyMHRoZW0uJTIwVGhlJTIwZ2lybCdzJTIwbmV1dHJhbCUyMGV4cHJlc3Npb24lMjBjb250cmFzdHMlMjBzaGFycGx5JTIwd2l0aCUyMHRoZSUyMGNoYW9zJTIwb2YlMjB0aGUlMjBmaXJlJTJDJTIwY3JlYXRpbmclMjBhJTIwcG9pZ25hbnQlMjBhbmQlMjBlbW90aW9uYWxseSUyMGNoYXJnZWQlMjBzY2VuZS4lMjIlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjJ3b3JzdCUyMHF1YWxpdHklMkMlMjBpbmNvbnNpc3RlbnQlMjBtb3Rpb24lMkMlMjBibHVycnklMkMlMjBqaXR0ZXJ5JTJDJTIwZGlzdG9ydGVkJTIyJTBBJTBBdmlkZW8lMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDcwNCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDQ4MCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0QxNjElMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fdmlkZW8odmlkZW8lMkMlMjAlMjJvdXRwdXQubXA0JTIyJTJDJTIwZnBzJTNEMjQp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXImageToVideoPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_image | |
| <span class="hljs-meta">>>> </span>pipe = LTXImageToVideoPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/a-r-r-o-w/tiny-meme-dataset-captioned/resolve/main/images/8.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A young girl stands calmly in the foreground, looking directly at the camera, as a house fire rages in the background. Flames engulf the structure, with smoke billowing into the air. Firefighters in protective gear rush to the scene, a fire truck labeled '38' visible behind them. The girl's neutral expression contrasts sharply with the chaos of the fire, creating a poignant and emotionally charged scene."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">704</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">480</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){r=l("p"),r.textContent=J,u=s(),h(c.$$.fragment)},l(a){r=i(a,"P",{"data-svelte-h":!0}),T(r)!=="svelte-kvfsh7"&&(r.textContent=J),u=o(a),g(c.$$.fragment,a)},m(a,b){m(a,r,b),m(a,u,b),f(c,a,b),p=!0},p:Bt,i(a){p||(M(c.$$.fragment,a),p=!0)},o(a){y(c.$$.fragment,a),p=!1},d(a){a&&(n(r),n(u)),_(c,a)}}}function io(G){let r,J="Examples:",u,c,p;return c=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnBpcGVsaW5lcy5sdHgucGlwZWxpbmVfbHR4X2NvbmRpdGlvbiUyMGltcG9ydCUyMExUWENvbmRpdGlvblBpcGVsaW5lJTJDJTIwTFRYVmlkZW9Db25kaXRpb24lMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTJDJTIwbG9hZF92aWRlbyUyQyUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8tMC45LjUlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEElMjMlMjBMb2FkJTIwaW5wdXQlMjBpbWFnZSUyMGFuZCUyMHZpZGVvJTBBdmlkZW8lMjAlM0QlMjBsb2FkX3ZpZGVvKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmNvc21vcyUyRmNvc21vcy12aWRlbzJ3b3JsZC1pbnB1dC12aWQubXA0JTIyJTBBKSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZjb3Ntb3MlMkZjb3Ntb3MtdmlkZW8yd29ybGQtaW5wdXQuanBnJTIyJTBBKSUwQSUwQSUyMyUyMENyZWF0ZSUyMGNvbmRpdGlvbmluZyUyMG9iamVjdHMlMEFjb25kaXRpb24xJTIwJTNEJTIwTFRYVmlkZW9Db25kaXRpb24oJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGZyYW1lX2luZGV4JTNEMCUyQyUwQSklMEFjb25kaXRpb24yJTIwJTNEJTIwTFRYVmlkZW9Db25kaXRpb24oJTBBJTIwJTIwJTIwJTIwdmlkZW8lM0R2aWRlbyUyQyUwQSUyMCUyMCUyMCUyMGZyYW1lX2luZGV4JTNEODAlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyVGhlJTIwdmlkZW8lMjBkZXBpY3RzJTIwYSUyMGxvbmclMkMlMjBzdHJhaWdodCUyMGhpZ2h3YXklMjBzdHJldGNoaW5nJTIwaW50byUyMHRoZSUyMGRpc3RhbmNlJTJDJTIwZmxhbmtlZCUyMGJ5JTIwbWV0YWwlMjBndWFyZHJhaWxzLiUyMFRoZSUyMHJvYWQlMjBpcyUyMGRpdmlkZWQlMjBpbnRvJTIwbXVsdGlwbGUlMjBsYW5lcyUyQyUyMHdpdGglMjBhJTIwZmV3JTIwdmVoaWNsZXMlMjB2aXNpYmxlJTIwaW4lMjB0aGUlMjBmYXIlMjBkaXN0YW5jZS4lMjBUaGUlMjBzdXJyb3VuZGluZyUyMGxhbmRzY2FwZSUyMGZlYXR1cmVzJTIwZHJ5JTJDJTIwZ3Jhc3N5JTIwZmllbGRzJTIwb24lMjBvbmUlMjBzaWRlJTIwYW5kJTIwcm9sbGluZyUyMGhpbGxzJTIwb24lMjB0aGUlMjBvdGhlci4lMjBUaGUlMjBza3klMjBpcyUyMG1vc3RseSUyMGNsZWFyJTIwd2l0aCUyMGElMjBmZXclMjBzY2F0dGVyZWQlMjBjbG91ZHMlMkMlMjBzdWdnZXN0aW5nJTIwYSUyMGJyaWdodCUyQyUyMHN1bm55JTIwZGF5LiUyMEFuZCUyMHRoZW4lMjB0aGUlMjBjYW1lcmElMjBzd2l0Y2glMjB0byUyMGElMjB3aW5kaW5nJTIwbW91bnRhaW4lMjByb2FkJTIwY292ZXJlZCUyMGluJTIwc25vdyUyQyUyMHdpdGglMjBhJTIwc2luZ2xlJTIwdmVoaWNsZSUyMHRyYXZlbGluZyUyMGFsb25nJTIwaXQuJTIwVGhlJTIwcm9hZCUyMGlzJTIwZmxhbmtlZCUyMGJ5JTIwc3RlZXAlMkMlMjByb2NreSUyMGNsaWZmcyUyMGFuZCUyMHNwYXJzZSUyMHZlZ2V0YXRpb24uJTIwVGhlJTIwbGFuZHNjYXBlJTIwaXMlMjBjaGFyYWN0ZXJpemVkJTIwYnklMjBydWdnZWQlMjB0ZXJyYWluJTIwYW5kJTIwYSUyMHJpdmVyJTIwdmlzaWJsZSUyMGluJTIwdGhlJTIwZGlzdGFuY2UuJTIwVGhlJTIwc2NlbmUlMjBjYXB0dXJlcyUyMHRoZSUyMHNvbGl0dWRlJTIwYW5kJTIwYmVhdXR5JTIwb2YlMjBhJTIwd2ludGVyJTIwZHJpdmUlMjB0aHJvdWdoJTIwYSUyMG1vdW50YWlub3VzJTIwcmVnaW9uLiUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEElMEElMjMlMjBHZW5lcmF0ZSUyMHZpZGVvJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCUyMmN1ZGElMjIpLm1hbnVhbF9zZWVkKDApJTBBJTIzJTIwVGV4dC1vbmx5JTIwY29uZGl0aW9uaW5nJTIwaXMlMjBhbHNvJTIwc3VwcG9ydGVkJTIwd2l0aG91dCUyMHRoZSUyMG5lZWQlMjB0byUyMHBhc3MlMjAlNjBjb25kaXRpb25zJTYwJTBBdmlkZW8lMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMGNvbmRpdGlvbnMlM0QlNUJjb25kaXRpb24xJTJDJTIwY29uZGl0aW9uMiU1RCUyQyUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDE2MSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q0MCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUwQSkuZnJhbWVzJTVCMCU1RCUwQSUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNCk=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.pipelines.ltx.pipeline_ltx_condition <span class="hljs-keyword">import</span> LTXConditionPipeline, LTXVideoCondition | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_video, load_image | |
| <span class="hljs-meta">>>> </span>pipe = LTXConditionPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video-0.9.5"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Load input image and video</span> | |
| <span class="hljs-meta">>>> </span>video = load_video( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input.jpg"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Create conditioning objects</span> | |
| <span class="hljs-meta">>>> </span>condition1 = LTXVideoCondition( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> frame_index=<span class="hljs-number">0</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>condition2 = LTXVideoCondition( | |
| <span class="hljs-meta">... </span> video=video, | |
| <span class="hljs-meta">... </span> frame_index=<span class="hljs-number">80</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"The video depicts a long, straight highway stretching into the distance, flanked by metal guardrails. The road is divided into multiple lanes, with a few vehicles visible in the far distance. The surrounding landscape features dry, grassy fields on one side and rolling hills on the other. The sky is mostly clear with a few scattered clouds, suggesting a bright, sunny day. And then the camera switch to a winding mountain road covered in snow, with a single vehicle traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. The landscape is characterized by rugged terrain and a river visible in the distance. The scene captures the solitude and beauty of a winter drive through a mountainous region."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Generate video</span> | |
| <span class="hljs-meta">>>> </span>generator = torch.Generator(<span class="hljs-string">"cuda"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Text-only conditioning is also supported without the need to pass \`conditions\`</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> conditions=[condition1, condition2], | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">768</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">40</span>, | |
| <span class="hljs-meta">... </span> generator=generator, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){r=l("p"),r.textContent=J,u=s(),h(c.$$.fragment)},l(a){r=i(a,"P",{"data-svelte-h":!0}),T(r)!=="svelte-kvfsh7"&&(r.textContent=J),u=o(a),g(c.$$.fragment,a)},m(a,b){m(a,r,b),m(a,u,b),f(c,a,b),p=!0},p:Bt,i(a){p||(M(c.$$.fragment,a),p=!0)},o(a){y(c.$$.fragment,a),p=!1},d(a){a&&(n(r),n(u)),_(c,a)}}}function ro(G){let r,J,u,c,p,a='<div class="flex flex-wrap space-x-1"><a href="https://huggingface.co/docs/diffusers/main/en/tutorials/using_peft_for_inference" target="_blank" rel="noopener"><img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/></a> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"/></div>',b,X,U,C,cs='<a href="https://huggingface.co/Lightricks/LTX-Video" rel="nofollow">LTX-Video</a> is a diffusion transformer designed for fast and real-time generation of high-resolution videos from text and images. The main feature of LTX-Video is the Video-VAE. The Video-VAE has a higher pixel to latent compression ratio (1:192) which enables more efficient video data processing and faster generation speed. To support and prevent finer details from being lost during generation, the Video-VAE decoder performs the latent to pixel conversion <em>and</em> the last denoising step.',Wt,re,ms='You can find all the original LTX-Video checkpoints under the <a href="https://huggingface.co/Lightricks" rel="nofollow">Lightricks</a> organization.',kt,S,Vt,de,us="The example below demonstrates how to generate a video optimized for memory or inference speed.",Ct,P,Rt,pe,Lt,W,qe,hs='<p>Refer to the following recommended settings for generation from the <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">LTX-Video</a> repository.</p> <ul><li>The recommended dtype for the transformer, VAE, and text encoder is <code>torch.bfloat16</code>. The VAE and text encoder can also be <code>torch.float32</code> or <code>torch.float16</code>.</li> <li>For guidance-distilled variants of LTX-Video, set <code>guidance_scale</code> to <code>1.0</code>. The <code>guidance_scale</code> for any other model should be set higher, like <code>5.0</code>, for good generation quality.</li> <li>For timestep-aware VAE variants (LTX-Video 0.9.1 and above), set <code>decode_timestep</code> to <code>0.05</code> and <code>image_cond_noise_scale</code> to <code>0.025</code>.</li> <li>For variants that support interpolation between multiple conditioning images and videos (LTX-Video 0.9.5 and above), use similar images and videos for the best results. Divergence from the conditioning inputs may lead to abrupt transitionts in the generated video.</li></ul>',hn,ce,Oe,gs="LTX-Video 0.9.7 includes a spatial latent upscaler and a 13B parameter transformer. During inference, a low resolution video is quickly generated first and then upscaled and refined.",gn,me,Ke,fs="Show example code",fn,ue,Mn,E,et,Ms="LTX-Video 0.9.7 distilled model is guidance and timestep-distilled to speedup generation. It requires <code>guidance_scale</code> to be set to <code>1.0</code> and <code>num_inference_steps</code> should be set between <code>4</code> and <code>10</code> for good generation quality. You should also use the following custom timesteps for the best results.",yn,tt,ys="<li>Base model inference to prepare for upscaling: <code>[1000, 993, 987, 981, 975, 909, 725, 0.03]</code>.</li> <li>Upscaling: <code>[1000, 909, 725, 421, 0]</code>.</li>",_n,he,nt,_s="Show example code",Tn,ge,bn,fe,st,Ts='LTX-Video supports LoRAs with <a href="/docs/diffusers/pr_12067/en/api/loaders/lora#diffusers.loaders.LTXVideoLoraLoaderMixin.load_lora_weights">load_lora_weights()</a>.',wn,Me,ot,bs="Show example code",Jn,ye,Un,_e,at,ws='LTX-Video supports loading from single files, such as <a href="../../quantization/gguf">GGUF checkpoints</a>, with <a href="/docs/diffusers/pr_12067/en/api/loaders/single_file#diffusers.loaders.FromOriginalModelMixin.from_single_file">loaders.FromOriginalModelMixin.from_single_file()</a> or <a href="/docs/diffusers/pr_12067/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">loaders.FromSingleFileMixin.from_single_file()</a>.',jn,Te,lt,Js="Show example code",vn,be,Nt,we,Yt,B,Je,Zn,it,Us="Pipeline for text-to-video generation.",In,rt,js='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',Gn,L,Ue,Xn,dt,vs="Function invoked when calling the pipeline for generation.",Bn,A,xn,D,je,Wn,pt,Zs="Encodes the prompt into text encoder hidden states.",Ft,ve,Ht,x,Ze,kn,ct,Is="Pipeline for image-to-video generation.",Vn,mt,Gs='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',Cn,N,Ie,Rn,ut,Xs="Function invoked when calling the pipeline for generation.",Ln,q,Nn,O,Ge,Yn,ht,Bs="Encodes the prompt into text encoder hidden states.",Et,Xe,Qt,v,Be,Fn,gt,xs="Pipeline for text/image/video-to-video generation.",Hn,ft,Ws='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',En,Y,xe,Qn,Mt,ks="Function invoked when calling the pipeline for generation.",$n,K,zn,ee,We,Sn,yt,Vs=`Add timestep-dependent noise to the hard-conditioning latents. This helps with motion continuity, especially | |
| when conditioned on a single frame.`,Pn,te,ke,An,_t,Cs="Encodes the prompt into text encoder hidden states.",Dn,ne,Ve,qn,Tt,Rs="Trim a conditioning sequence to the allowed number of frames.",$t,Ce,zt,Z,Re,On,bt,Le,Kn,se,Ne,es,wt,Ls=`Applies Adaptive Instance Normalization (AdaIN) to a latent tensor based on statistics from a reference latent | |
| tensor.`,ts,oe,Ye,ns,Jt,Ns=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ss,ae,Fe,os,Ut,Ys=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,as,le,He,ls,jt,Fs=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,is,ie,Ee,rs,vt,Hs=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,St,Qe,Pt,Q,$e,ds,Zt,Es="Output class for LTX pipelines.",At,ze,Dt,xt,qt;return X=new De({props:{title:"LTX-Video",local:"ltx-video",headingTag:"h1"}}),S=new Os({props:{warning:!1,$$slots:{default:[to]},$$scope:{ctx:G}}}),P=new eo({props:{id:"usage",options:["memory","inference speed"],$$slots:{default:[oo]},$$scope:{ctx:G}}}),pe=new De({props:{title:"Notes",local:"notes",headingTag:"h2"}}),ue=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUlMkMlMjBMVFhMYXRlbnRVcHNhbXBsZVBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy5waXBlbGluZXMubHR4LnBpcGVsaW5lX2x0eF9jb25kaXRpb24lMjBpbXBvcnQlMjBMVFhWaWRlb0NvbmRpdGlvbiUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBleHBvcnRfdG9fdmlkZW8lMkMlMjBsb2FkX3ZpZGVvJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBMVFhDb25kaXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlby0wLjkuNy1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGVsaW5lX3Vwc2FtcGxlJTIwJTNEJTIwTFRYTGF0ZW50VXBzYW1wbGVQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyTGlnaHRyaWNrcyUyRmx0eHYtc3BhdGlhbC11cHNjYWxlci0wLjkuNyUyMiUyQyUyMHZhZSUzRHBpcGVsaW5lLnZhZSUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZWxpbmUudG8oJTIyY3VkYSUyMiklMEFwaXBlX3Vwc2FtcGxlLnRvKCUyMmN1ZGElMjIpJTBBcGlwZWxpbmUudmFlLmVuYWJsZV90aWxpbmcoKSUwQSUwQWRlZiUyMHJvdW5kX3RvX25lYXJlc3RfcmVzb2x1dGlvbl9hY2NlcHRhYmxlX2J5X3ZhZShoZWlnaHQlMkMlMjB3aWR0aCklM0ElMEElMjAlMjAlMjAlMjBoZWlnaHQlMjAlM0QlMjBoZWlnaHQlMjAtJTIwKGhlaWdodCUyMCUyNSUyMHBpcGVsaW5lLnZhZV90ZW1wb3JhbF9jb21wcmVzc2lvbl9yYXRpbyklMEElMjAlMjAlMjAlMjB3aWR0aCUyMCUzRCUyMHdpZHRoJTIwLSUyMCh3aWR0aCUyMCUyNSUyMHBpcGVsaW5lLnZhZV90ZW1wb3JhbF9jb21wcmVzc2lvbl9yYXRpbyklMEElMjAlMjAlMjAlMjByZXR1cm4lMjBoZWlnaHQlMkMlMjB3aWR0aCUwQSUwQXZpZGVvJTIwJTNEJTIwbG9hZF92aWRlbyglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZjb3Ntb3MlMkZjb3Ntb3MtdmlkZW8yd29ybGQtaW5wdXQtdmlkLm1wNCUyMiUwQSklNUIlM0EyMSU1RCUyMCUyMCUyMyUyMG9ubHklMjB1c2UlMjB0aGUlMjBmaXJzdCUyMDIxJTIwZnJhbWVzJTIwYXMlMjBjb25kaXRpb25pbmclMEFjb25kaXRpb24xJTIwJTNEJTIwTFRYVmlkZW9Db25kaXRpb24odmlkZW8lM0R2aWRlbyUyQyUyMGZyYW1lX2luZGV4JTNEMCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjIlMjIlMjIlMEFUaGUlMjB2aWRlbyUyMGRlcGljdHMlMjBhJTIwd2luZGluZyUyMG1vdW50YWluJTIwcm9hZCUyMGNvdmVyZWQlMjBpbiUyMHNub3clMkMlMjB3aXRoJTIwYSUyMHNpbmdsZSUyMHZlaGljbGUlMjAlMEF0cmF2ZWxpbmclMjBhbG9uZyUyMGl0LiUyMFRoZSUyMHJvYWQlMjBpcyUyMGZsYW5rZWQlMjBieSUyMHN0ZWVwJTJDJTIwcm9ja3klMjBjbGlmZnMlMjBhbmQlMjBzcGFyc2UlMjB2ZWdldGF0aW9uLiUyMCUwQVRoZSUyMGxhbmRzY2FwZSUyMGlzJTIwY2hhcmFjdGVyaXplZCUyMGJ5JTIwcnVnZ2VkJTIwdGVycmFpbiUyMGFuZCUyMGElMjByaXZlciUyMHZpc2libGUlMjBpbiUyMHRoZSUyMGRpc3RhbmNlLiUyMCUwQVRoZSUyMHNjZW5lJTIwY2FwdHVyZXMlMjB0aGUlMjBzb2xpdHVkZSUyMGFuZCUyMGJlYXV0eSUyMG9mJTIwYSUyMHdpbnRlciUyMGRyaXZlJTIwdGhyb3VnaCUyMGElMjBtb3VudGFpbm91cyUyMHJlZ2lvbi4lMEElMjIlMjIlMjIlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjJ3b3JzdCUyMHF1YWxpdHklMkMlMjBpbmNvbnNpc3RlbnQlMjBtb3Rpb24lMkMlMjBibHVycnklMkMlMjBqaXR0ZXJ5JTJDJTIwZGlzdG9ydGVkJTIyJTBBZXhwZWN0ZWRfaGVpZ2h0JTJDJTIwZXhwZWN0ZWRfd2lkdGglMjAlM0QlMjA3NjglMkMlMjAxMTUyJTBBZG93bnNjYWxlX2ZhY3RvciUyMCUzRCUyMDIlMjAlMkYlMjAzJTBBbnVtX2ZyYW1lcyUyMCUzRCUyMDE2MSUwQSUwQSUyMyUyMDEuJTIwR2VuZXJhdGUlMjB2aWRlbyUyMGF0JTIwc21hbGxlciUyMHJlc29sdXRpb24lMEElMjMlMjBUZXh0LW9ubHklMjBjb25kaXRpb25pbmclMjBpcyUyMGFsc28lMjBzdXBwb3J0ZWQlMjB3aXRob3V0JTIwdGhlJTIwbmVlZCUyMHRvJTIwcGFzcyUyMCU2MGNvbmRpdGlvbnMlNjAlMEFkb3duc2NhbGVkX2hlaWdodCUyQyUyMGRvd25zY2FsZWRfd2lkdGglMjAlM0QlMjBpbnQoZXhwZWN0ZWRfaGVpZ2h0JTIwKiUyMGRvd25zY2FsZV9mYWN0b3IpJTJDJTIwaW50KGV4cGVjdGVkX3dpZHRoJTIwKiUyMGRvd25zY2FsZV9mYWN0b3IpJTBBZG93bnNjYWxlZF9oZWlnaHQlMkMlMjBkb3duc2NhbGVkX3dpZHRoJTIwJTNEJTIwcm91bmRfdG9fbmVhcmVzdF9yZXNvbHV0aW9uX2FjY2VwdGFibGVfYnlfdmFlKGRvd25zY2FsZWRfaGVpZ2h0JTJDJTIwZG93bnNjYWxlZF93aWR0aCklMEFsYXRlbnRzJTIwJTNEJTIwcGlwZWxpbmUoJTBBJTIwJTIwJTIwJTIwY29uZGl0aW9ucyUzRCU1QmNvbmRpdGlvbjElNUQlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRGRvd25zY2FsZWRfd2lkdGglMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Rkb3duc2NhbGVkX2hlaWdodCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0RudW1fZnJhbWVzJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX3RpbWVzdGVwJTNEMC4wNSUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2VfY29uZF9ub2lzZV9zY2FsZSUzRDAuMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNENS4wJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2VfcmVzY2FsZSUzRDAuNyUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDApJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJsYXRlbnQlMjIlMkMlMEEpLmZyYW1lcyUwQSUwQSUyMyUyMDIuJTIwVXBzY2FsZSUyMGdlbmVyYXRlZCUyMHZpZGVvJTIwdXNpbmclMjBsYXRlbnQlMjB1cHNhbXBsZXIlMjB3aXRoJTIwZmV3ZXIlMjBpbmZlcmVuY2UlMjBzdGVwcyUwQSUyMyUyMFRoZSUyMGF2YWlsYWJsZSUyMGxhdGVudCUyMHVwc2FtcGxlciUyMHVwc2NhbGVzJTIwdGhlJTIwaGVpZ2h0JTJGd2lkdGglMjBieSUyMDJ4JTBBdXBzY2FsZWRfaGVpZ2h0JTJDJTIwdXBzY2FsZWRfd2lkdGglMjAlM0QlMjBkb3duc2NhbGVkX2hlaWdodCUyMColMjAyJTJDJTIwZG93bnNjYWxlZF93aWR0aCUyMColMjAyJTBBdXBzY2FsZWRfbGF0ZW50cyUyMCUzRCUyMHBpcGVfdXBzYW1wbGUoJTBBJTIwJTIwJTIwJTIwbGF0ZW50cyUzRGxhdGVudHMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMmxhdGVudCUyMiUwQSkuZnJhbWVzJTBBJTBBJTIzJTIwMy4lMjBEZW5vaXNlJTIwdGhlJTIwdXBzY2FsZWQlMjB2aWRlbyUyMHdpdGglMjBmZXclMjBzdGVwcyUyMHRvJTIwaW1wcm92ZSUyMHRleHR1cmUlMjAob3B0aW9uYWwlMkMlMjBidXQlMjByZWNvbW1lbmRlZCklMEF2aWRlbyUyMCUzRCUyMHBpcGVsaW5lKCUwQSUyMCUyMCUyMCUyMGNvbmRpdGlvbnMlM0QlNUJjb25kaXRpb24xJTVEJTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0R1cHNjYWxlZF93aWR0aCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRHVwc2NhbGVkX2hlaWdodCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0RudW1fZnJhbWVzJTJDJTBBJTIwJTIwJTIwJTIwZGVub2lzZV9zdHJlbmd0aCUzRDAuNCUyQyUyMCUyMCUyMyUyMEVmZmVjdGl2ZWx5JTJDJTIwNCUyMGluZmVyZW5jZSUyMHN0ZXBzJTIwb3V0JTIwb2YlMjAxMCUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QxMCUyQyUwQSUyMCUyMCUyMCUyMGxhdGVudHMlM0R1cHNjYWxlZF9sYXRlbnRzJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX3RpbWVzdGVwJTNEMC4wNSUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2VfY29uZF9ub2lzZV9zY2FsZSUzRDAuMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNENS4wJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2VfcmVzY2FsZSUzRDAuNyUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDApJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwaWwlMjIlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEElMEElMjMlMjA0LiUyMERvd25zY2FsZSUyMHRoZSUyMHZpZGVvJTIwdG8lMjB0aGUlMjBleHBlY3RlZCUyMHJlc29sdXRpb24lMEF2aWRlbyUyMCUzRCUyMCU1QmZyYW1lLnJlc2l6ZSgoZXhwZWN0ZWRfd2lkdGglMkMlMjBleHBlY3RlZF9oZWlnaHQpKSUyMGZvciUyMGZyYW1lJTIwaW4lMjB2aWRlbyU1RCUwQSUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNCk=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXConditionPipeline, LTXLatentUpsamplePipeline | |
| <span class="hljs-keyword">from</span> diffusers.pipelines.ltx.pipeline_ltx_condition <span class="hljs-keyword">import</span> LTXVideoCondition | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_video | |
| pipeline = LTXConditionPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video-0.9.7-dev"</span>, torch_dtype=torch.bfloat16) | |
| pipeline_upsample = LTXLatentUpsamplePipeline.from_pretrained(<span class="hljs-string">"Lightricks/ltxv-spatial-upscaler-0.9.7"</span>, vae=pipeline.vae, torch_dtype=torch.bfloat16) | |
| pipeline.to(<span class="hljs-string">"cuda"</span>) | |
| pipe_upsample.to(<span class="hljs-string">"cuda"</span>) | |
| pipeline.vae.enable_tiling() | |
| <span class="hljs-keyword">def</span> <span class="hljs-title function_">round_to_nearest_resolution_acceptable_by_vae</span>(<span class="hljs-params">height, width</span>): | |
| height = height - (height % pipeline.vae_temporal_compression_ratio) | |
| width = width - (width % pipeline.vae_temporal_compression_ratio) | |
| <span class="hljs-keyword">return</span> height, width | |
| video = load_video( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4"</span> | |
| )[:<span class="hljs-number">21</span>] <span class="hljs-comment"># only use the first 21 frames as conditioning</span> | |
| condition1 = LTXVideoCondition(video=video, frame_index=<span class="hljs-number">0</span>) | |
| prompt = <span class="hljs-string">""" | |
| The video depicts a winding mountain road covered in snow, with a single vehicle | |
| traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. | |
| The landscape is characterized by rugged terrain and a river visible in the distance. | |
| The scene captures the solitude and beauty of a winter drive through a mountainous region. | |
| """</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| expected_height, expected_width = <span class="hljs-number">768</span>, <span class="hljs-number">1152</span> | |
| downscale_factor = <span class="hljs-number">2</span> / <span class="hljs-number">3</span> | |
| num_frames = <span class="hljs-number">161</span> | |
| <span class="hljs-comment"># 1. Generate video at smaller resolution</span> | |
| <span class="hljs-comment"># Text-only conditioning is also supported without the need to pass \`conditions\`</span> | |
| downscaled_height, downscaled_width = <span class="hljs-built_in">int</span>(expected_height * downscale_factor), <span class="hljs-built_in">int</span>(expected_width * downscale_factor) | |
| downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae(downscaled_height, downscaled_width) | |
| latents = pipeline( | |
| conditions=[condition1], | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=downscaled_width, | |
| height=downscaled_height, | |
| num_frames=num_frames, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| decode_timestep=<span class="hljs-number">0.05</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| image_cond_noise_scale=<span class="hljs-number">0.0</span>, | |
| guidance_scale=<span class="hljs-number">5.0</span>, | |
| guidance_rescale=<span class="hljs-number">0.7</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"latent"</span>, | |
| ).frames | |
| <span class="hljs-comment"># 2. Upscale generated video using latent upsampler with fewer inference steps</span> | |
| <span class="hljs-comment"># The available latent upsampler upscales the height/width by 2x</span> | |
| upscaled_height, upscaled_width = downscaled_height * <span class="hljs-number">2</span>, downscaled_width * <span class="hljs-number">2</span> | |
| upscaled_latents = pipe_upsample( | |
| latents=latents, | |
| output_type=<span class="hljs-string">"latent"</span> | |
| ).frames | |
| <span class="hljs-comment"># 3. Denoise the upscaled video with few steps to improve texture (optional, but recommended)</span> | |
| video = pipeline( | |
| conditions=[condition1], | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=upscaled_width, | |
| height=upscaled_height, | |
| num_frames=num_frames, | |
| denoise_strength=<span class="hljs-number">0.4</span>, <span class="hljs-comment"># Effectively, 4 inference steps out of 10</span> | |
| num_inference_steps=<span class="hljs-number">10</span>, | |
| latents=upscaled_latents, | |
| decode_timestep=<span class="hljs-number">0.05</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| image_cond_noise_scale=<span class="hljs-number">0.0</span>, | |
| guidance_scale=<span class="hljs-number">5.0</span>, | |
| guidance_rescale=<span class="hljs-number">0.7</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"pil"</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># 4. Downscale the video to the expected resolution</span> | |
| video = [frame.resize((expected_width, expected_height)) <span class="hljs-keyword">for</span> frame <span class="hljs-keyword">in</span> video] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),ge=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUlMkMlMjBMVFhMYXRlbnRVcHNhbXBsZVBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy5waXBlbGluZXMubHR4LnBpcGVsaW5lX2x0eF9jb25kaXRpb24lMjBpbXBvcnQlMjBMVFhWaWRlb0NvbmRpdGlvbiUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBleHBvcnRfdG9fdmlkZW8lMkMlMjBsb2FkX3ZpZGVvJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBMVFhDb25kaXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlby0wLjkuNy1kaXN0aWxsZWQlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGVfdXBzYW1wbGUlMjAlM0QlMjBMVFhMYXRlbnRVcHNhbXBsZVBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJMaWdodHJpY2tzJTJGbHR4di1zcGF0aWFsLXVwc2NhbGVyLTAuOS43JTIyJTJDJTIwdmFlJTNEcGlwZWxpbmUudmFlJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlbGluZS50byglMjJjdWRhJTIyKSUwQXBpcGVfdXBzYW1wbGUudG8oJTIyY3VkYSUyMiklMEFwaXBlbGluZS52YWUuZW5hYmxlX3RpbGluZygpJTBBJTBBZGVmJTIwcm91bmRfdG9fbmVhcmVzdF9yZXNvbHV0aW9uX2FjY2VwdGFibGVfYnlfdmFlKGhlaWdodCUyQyUyMHdpZHRoKSUzQSUwQSUyMCUyMCUyMCUyMGhlaWdodCUyMCUzRCUyMGhlaWdodCUyMC0lMjAoaGVpZ2h0JTIwJTI1JTIwcGlwZWxpbmUudmFlX3RlbXBvcmFsX2NvbXByZXNzaW9uX3JhdGlvKSUwQSUyMCUyMCUyMCUyMHdpZHRoJTIwJTNEJTIwd2lkdGglMjAtJTIwKHdpZHRoJTIwJTI1JTIwcGlwZWxpbmUudmFlX3RlbXBvcmFsX2NvbXByZXNzaW9uX3JhdGlvKSUwQSUyMCUyMCUyMCUyMHJldHVybiUyMGhlaWdodCUyQyUyMHdpZHRoJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyJTIyJTIyJTBBYXJ0aXN0aWMlMjBhbmF0b21pY2FsJTIwM2QlMjByZW5kZXIlMkMlMjB1dGxyYSUyMHF1YWxpdHklMkMlMjBodW1hbiUyMGhhbGYlMjBmdWxsJTIwbWFsZSUyMGJvZHklMjB3aXRoJTIwdHJhbnNwYXJlbnQlMjAlMEFza2luJTIwcmV2ZWFsaW5nJTIwc3RydWN0dXJlJTIwaW5zdGVhZCUyMG9mJTIwb3JnYW5zJTJDJTIwbXVzY3VsYXIlMkMlMjBpbnRyaWNhdGUlMjBjcmVhdGl2ZSUyMHBhdHRlcm5zJTJDJTIwJTBBbW9ub2Nocm9tYXRpYyUyMHdpdGglMjBiYWNrbGlnaHRpbmclMkMlMjBsaWdodG5pbmclMjBtZXNoJTJDJTIwc2NpZW50aWZpYyUyMGNvbmNlcHQlMjBhcnQlMkMlMjBibGVuZGluZyUyMGJpb2xvZ3klMjAlMEF3aXRoJTIwYm90YW55JTJDJTIwc3VycmVhbCUyMGFuZCUyMGV0aGVyZWFsJTIwcXVhbGl0eSUyQyUyMHVucmVhbCUyMGVuZ2luZSUyMDUlMkMlMjByYXklMjB0cmFjaW5nJTJDJTIwdWx0cmElMjByZWFsaXN0aWMlMkMlMjAlMEExNkslMjBVSEQlMkMlMjByaWNoJTIwZGV0YWlscy4lMjBjYW1lcmElMjB6b29tcyUyMG91dCUyMGluJTIwYSUyMHJvdGF0aW5nJTIwZmFzaGlvbiUwQSUyMiUyMiUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEFleHBlY3RlZF9oZWlnaHQlMkMlMjBleHBlY3RlZF93aWR0aCUyMCUzRCUyMDc2OCUyQyUyMDExNTIlMEFkb3duc2NhbGVfZmFjdG9yJTIwJTNEJTIwMiUyMCUyRiUyMDMlMEFudW1fZnJhbWVzJTIwJTNEJTIwMTYxJTBBJTBBJTIzJTIwMS4lMjBHZW5lcmF0ZSUyMHZpZGVvJTIwYXQlMjBzbWFsbGVyJTIwcmVzb2x1dGlvbiUwQWRvd25zY2FsZWRfaGVpZ2h0JTJDJTIwZG93bnNjYWxlZF93aWR0aCUyMCUzRCUyMGludChleHBlY3RlZF9oZWlnaHQlMjAqJTIwZG93bnNjYWxlX2ZhY3RvciklMkMlMjBpbnQoZXhwZWN0ZWRfd2lkdGglMjAqJTIwZG93bnNjYWxlX2ZhY3RvciklMEFkb3duc2NhbGVkX2hlaWdodCUyQyUyMGRvd25zY2FsZWRfd2lkdGglMjAlM0QlMjByb3VuZF90b19uZWFyZXN0X3Jlc29sdXRpb25fYWNjZXB0YWJsZV9ieV92YWUoZG93bnNjYWxlZF9oZWlnaHQlMkMlMjBkb3duc2NhbGVkX3dpZHRoKSUwQWxhdGVudHMlMjAlM0QlMjBwaXBlbGluZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRGRvd25zY2FsZWRfd2lkdGglMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Rkb3duc2NhbGVkX2hlaWdodCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0RudW1fZnJhbWVzJTJDJTBBJTIwJTIwJTIwJTIwdGltZXN0ZXBzJTNEJTVCMTAwMCUyQyUyMDk5MyUyQyUyMDk4NyUyQyUyMDk4MSUyQyUyMDk3NSUyQyUyMDkwOSUyQyUyMDcyNSUyQyUyMDAuMDMlNUQlMkMlMEElMjAlMjAlMjAlMjBkZWNvZGVfdGltZXN0ZXAlM0QwLjA1JTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX25vaXNlX3NjYWxlJTNEMC4wMjUlMkMlMEElMjAlMjAlMjAlMjBpbWFnZV9jb25kX25vaXNlX3NjYWxlJTNEMC4wJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QxLjAlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9yZXNjYWxlJTNEMC43JTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMCklMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMmxhdGVudCUyMiUyQyUwQSkuZnJhbWVzJTBBJTBBJTIzJTIwMi4lMjBVcHNjYWxlJTIwZ2VuZXJhdGVkJTIwdmlkZW8lMjB1c2luZyUyMGxhdGVudCUyMHVwc2FtcGxlciUyMHdpdGglMjBmZXdlciUyMGluZmVyZW5jZSUyMHN0ZXBzJTBBJTIzJTIwVGhlJTIwYXZhaWxhYmxlJTIwbGF0ZW50JTIwdXBzYW1wbGVyJTIwdXBzY2FsZXMlMjB0aGUlMjBoZWlnaHQlMkZ3aWR0aCUyMGJ5JTIwMnglMEF1cHNjYWxlZF9oZWlnaHQlMkMlMjB1cHNjYWxlZF93aWR0aCUyMCUzRCUyMGRvd25zY2FsZWRfaGVpZ2h0JTIwKiUyMDIlMkMlMjBkb3duc2NhbGVkX3dpZHRoJTIwKiUyMDIlMEF1cHNjYWxlZF9sYXRlbnRzJTIwJTNEJTIwcGlwZV91cHNhbXBsZSglMEElMjAlMjAlMjAlMjBsYXRlbnRzJTNEbGF0ZW50cyUyQyUwQSUyMCUyMCUyMCUyMGFkYWluX2ZhY3RvciUzRDEuMCUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIybGF0ZW50JTIyJTBBKS5mcmFtZXMlMEElMEElMjMlMjAzLiUyMERlbm9pc2UlMjB0aGUlMjB1cHNjYWxlZCUyMHZpZGVvJTIwd2l0aCUyMGZldyUyMHN0ZXBzJTIwdG8lMjBpbXByb3ZlJTIwdGV4dHVyZSUyMChvcHRpb25hbCUyQyUyMGJ1dCUyMHJlY29tbWVuZGVkKSUwQXZpZGVvJTIwJTNEJTIwcGlwZWxpbmUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0R1cHNjYWxlZF93aWR0aCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRHVwc2NhbGVkX2hlaWdodCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0RudW1fZnJhbWVzJTJDJTBBJTIwJTIwJTIwJTIwZGVub2lzZV9zdHJlbmd0aCUzRDAuOTk5JTJDJTIwJTIwJTIzJTIwRWZmZWN0aXZlbHklMkMlMjA0JTIwaW5mZXJlbmNlJTIwc3RlcHMlMjBvdXQlMjBvZiUyMDUlMEElMjAlMjAlMjAlMjB0aW1lc3RlcHMlM0QlNUIxMDAwJTJDJTIwOTA5JTJDJTIwNzI1JTJDJTIwNDIxJTJDJTIwMCU1RCUyQyUwQSUyMCUyMCUyMCUyMGxhdGVudHMlM0R1cHNjYWxlZF9sYXRlbnRzJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX3RpbWVzdGVwJTNEMC4wNSUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2VfY29uZF9ub2lzZV9zY2FsZSUzRDAuMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMS4wJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2VfcmVzY2FsZSUzRDAuNyUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDApJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwaWwlMjIlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEElMEElMjMlMjA0LiUyMERvd25zY2FsZSUyMHRoZSUyMHZpZGVvJTIwdG8lMjB0aGUlMjBleHBlY3RlZCUyMHJlc29sdXRpb24lMEF2aWRlbyUyMCUzRCUyMCU1QmZyYW1lLnJlc2l6ZSgoZXhwZWN0ZWRfd2lkdGglMkMlMjBleHBlY3RlZF9oZWlnaHQpKSUyMGZvciUyMGZyYW1lJTIwaW4lMjB2aWRlbyU1RCUwQSUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNCk=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXConditionPipeline, LTXLatentUpsamplePipeline | |
| <span class="hljs-keyword">from</span> diffusers.pipelines.ltx.pipeline_ltx_condition <span class="hljs-keyword">import</span> LTXVideoCondition | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_video | |
| pipeline = LTXConditionPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video-0.9.7-distilled"</span>, torch_dtype=torch.bfloat16) | |
| pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained(<span class="hljs-string">"Lightricks/ltxv-spatial-upscaler-0.9.7"</span>, vae=pipeline.vae, torch_dtype=torch.bfloat16) | |
| pipeline.to(<span class="hljs-string">"cuda"</span>) | |
| pipe_upsample.to(<span class="hljs-string">"cuda"</span>) | |
| pipeline.vae.enable_tiling() | |
| <span class="hljs-keyword">def</span> <span class="hljs-title function_">round_to_nearest_resolution_acceptable_by_vae</span>(<span class="hljs-params">height, width</span>): | |
| height = height - (height % pipeline.vae_temporal_compression_ratio) | |
| width = width - (width % pipeline.vae_temporal_compression_ratio) | |
| <span class="hljs-keyword">return</span> height, width | |
| prompt = <span class="hljs-string">""" | |
| artistic anatomical 3d render, utlra quality, human half full male body with transparent | |
| skin revealing structure instead of organs, muscular, intricate creative patterns, | |
| monochromatic with backlighting, lightning mesh, scientific concept art, blending biology | |
| with botany, surreal and ethereal quality, unreal engine 5, ray tracing, ultra realistic, | |
| 16K UHD, rich details. camera zooms out in a rotating fashion | |
| """</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| expected_height, expected_width = <span class="hljs-number">768</span>, <span class="hljs-number">1152</span> | |
| downscale_factor = <span class="hljs-number">2</span> / <span class="hljs-number">3</span> | |
| num_frames = <span class="hljs-number">161</span> | |
| <span class="hljs-comment"># 1. Generate video at smaller resolution</span> | |
| downscaled_height, downscaled_width = <span class="hljs-built_in">int</span>(expected_height * downscale_factor), <span class="hljs-built_in">int</span>(expected_width * downscale_factor) | |
| downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae(downscaled_height, downscaled_width) | |
| latents = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=downscaled_width, | |
| height=downscaled_height, | |
| num_frames=num_frames, | |
| timesteps=[<span class="hljs-number">1000</span>, <span class="hljs-number">993</span>, <span class="hljs-number">987</span>, <span class="hljs-number">981</span>, <span class="hljs-number">975</span>, <span class="hljs-number">909</span>, <span class="hljs-number">725</span>, <span class="hljs-number">0.03</span>], | |
| decode_timestep=<span class="hljs-number">0.05</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| image_cond_noise_scale=<span class="hljs-number">0.0</span>, | |
| guidance_scale=<span class="hljs-number">1.0</span>, | |
| guidance_rescale=<span class="hljs-number">0.7</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"latent"</span>, | |
| ).frames | |
| <span class="hljs-comment"># 2. Upscale generated video using latent upsampler with fewer inference steps</span> | |
| <span class="hljs-comment"># The available latent upsampler upscales the height/width by 2x</span> | |
| upscaled_height, upscaled_width = downscaled_height * <span class="hljs-number">2</span>, downscaled_width * <span class="hljs-number">2</span> | |
| upscaled_latents = pipe_upsample( | |
| latents=latents, | |
| adain_factor=<span class="hljs-number">1.0</span>, | |
| output_type=<span class="hljs-string">"latent"</span> | |
| ).frames | |
| <span class="hljs-comment"># 3. Denoise the upscaled video with few steps to improve texture (optional, but recommended)</span> | |
| video = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=upscaled_width, | |
| height=upscaled_height, | |
| num_frames=num_frames, | |
| denoise_strength=<span class="hljs-number">0.999</span>, <span class="hljs-comment"># Effectively, 4 inference steps out of 5</span> | |
| timesteps=[<span class="hljs-number">1000</span>, <span class="hljs-number">909</span>, <span class="hljs-number">725</span>, <span class="hljs-number">421</span>, <span class="hljs-number">0</span>], | |
| latents=upscaled_latents, | |
| decode_timestep=<span class="hljs-number">0.05</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| image_cond_noise_scale=<span class="hljs-number">0.0</span>, | |
| guidance_scale=<span class="hljs-number">1.0</span>, | |
| guidance_rescale=<span class="hljs-number">0.7</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"pil"</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># 4. Downscale the video to the expected resolution</span> | |
| video = [frame.resize((expected_width, expected_height)) <span class="hljs-keyword">for</span> frame <span class="hljs-keyword">in</span> video] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),ye=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTJDJTIwbG9hZF9pbWFnZSUwQSUwQXBpcGVsaW5lJTIwJTNEJTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8tMC45LjUlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQSUwQXBpcGVsaW5lLmxvYWRfbG9yYV93ZWlnaHRzKCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8tQ2FrZWlmeS1Mb1JBJTIyJTJDJTIwYWRhcHRlcl9uYW1lJTNEJTIyY2FrZWlmeSUyMiklMEFwaXBlbGluZS5zZXRfYWRhcHRlcnMoJTIyY2FrZWlmeSUyMiklMEElMEElMjMlMjB1c2UlMjAlMjJDQUtFSUZZJTIyJTIwdG8lMjB0cmlnZ2VyJTIwdGhlJTIwTG9SQSUwQXByb21wdCUyMCUzRCUyMCUyMkNBS0VJRlklMjBhJTIwcGVyc29uJTIwdXNpbmclMjBhJTIwa25pZmUlMjB0byUyMGN1dCUyMGElMjBjYWtlJTIwc2hhcGVkJTIwbGlrZSUyMGElMjBQaWthY2h1JTIwcGx1c2hpZSUyMiUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGTGlnaHRyaWNrcyUyRkxUWC1WaWRlby1DYWtlaWZ5LUxvUkElMkZyZXNvbHZlJTJGbWFpbiUyRmFzc2V0cyUyRmltYWdlcyUyRnBpa2FjaHUucG5nJTIyKSUwQSUwQXZpZGVvJTIwJTNEJTIwcGlwZWxpbmUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNENTc2JTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNENTc2JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDE2MSUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV90aW1lc3RlcCUzRDAuMDMlMkMlMEElMjAlMjAlMjAlMjBkZWNvZGVfbm9pc2Vfc2NhbGUlM0QwLjAyNSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUyQyUwQSkuZnJhbWVzJTVCMCU1RCUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXConditionPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_image | |
| pipeline = LTXConditionPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video-0.9.5"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| pipeline.load_lora_weights(<span class="hljs-string">"Lightricks/LTX-Video-Cakeify-LoRA"</span>, adapter_name=<span class="hljs-string">"cakeify"</span>) | |
| pipeline.set_adapters(<span class="hljs-string">"cakeify"</span>) | |
| <span class="hljs-comment"># use "CAKEIFY" to trigger the LoRA</span> | |
| prompt = <span class="hljs-string">"CAKEIFY a person using a knife to cut a cake shaped like a Pikachu plushie"</span> | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/Lightricks/LTX-Video-Cakeify-LoRA/resolve/main/assets/images/pikachu.png"</span>) | |
| video = pipeline( | |
| prompt=prompt, | |
| image=image, | |
| width=<span class="hljs-number">576</span>, | |
| height=<span class="hljs-number">576</span>, | |
| num_frames=<span class="hljs-number">161</span>, | |
| decode_timestep=<span class="hljs-number">0.03</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">26</span>)`,wrap:!1}}),be=new z({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMExUWFBpcGVsaW5lJTJDJTIwQXV0b01vZGVsJTJDJTIwR0dVRlF1YW50aXphdGlvbkNvbmZpZyUwQSUwQXRyYW5zZm9ybWVyJTIwJTNEJTIwQXV0b01vZGVsLmZyb21fc2luZ2xlX2ZpbGUoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmNpdHk5NiUyRkxUWC1WaWRlby1nZ3VmJTJGYmxvYiUyRm1haW4lMkZsdHgtdmlkZW8tMmItdjAuOS1RM19LX1MuZ2d1ZiUyMiUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RHR1VGUXVhbnRpemF0aW9uQ29uZmlnKGNvbXB1dGVfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGVsaW5lJTIwJTNEJTIwTFRYUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8lMjIlMkMlMEElMjAlMjAlMjAlMjB0cmFuc2Zvcm1lciUzRHRyYW5zZm9ybWVyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSk=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline, AutoModel, GGUFQuantizationConfig | |
| transformer = AutoModel.from_single_file( | |
| <span class="hljs-string">"https://huggingface.co/city96/LTX-Video-gguf/blob/main/ltx-video-2b-v0.9-Q3_K_S.gguf"</span> | |
| quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| pipeline = LTXPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| transformer=transformer, | |
| torch_dtype=torch.bfloat16 | |
| )`,wrap:!1}}),we=new De({props:{title:"LTXPipeline",local:"diffusers.LTXPipeline",headingTag:"h2"}}),Je=new I({props:{name:"class diffusers.LTXPipeline",anchor:"diffusers.LTXPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12067/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx.py#L170"}}),Ue=new I({props:{name:"__call__",anchor:"diffusers.LTXPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"guidance_rescale",val:": float = 0.0"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 128"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Guidance rescale factor proposed by <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a> <code>guidance_scale</code> is defined as <code>φ</code> in equation 16. of | |
| <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are Flawed</a>. | |
| Guidance rescale factor should fix overexposure when using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.LTXPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx.py#L535",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),A=new ps({props:{anchor:"diffusers.LTXPipeline.__call__.example",$$slots:{default:[ao]},$$scope:{ctx:G}}}),je=new I({props:{name:"encode_prompt",anchor:"diffusers.LTXPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 128"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx.py#L283"}}),ve=new De({props:{title:"LTXImageToVideoPipeline",local:"diffusers.LTXImageToVideoPipeline",headingTag:"h2"}}),Ze=new I({props:{name:"class diffusers.LTXImageToVideoPipeline",anchor:"diffusers.LTXImageToVideoPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXImageToVideoPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12067/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXImageToVideoPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXImageToVideoPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXImageToVideoPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXImageToVideoPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L189"}}),Ie=new I({props:{name:"__call__",anchor:"diffusers.LTXImageToVideoPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"guidance_rescale",val:": float = 0.0"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 128"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.__call__.image",description:`<strong>image</strong> (<code>PipelineImageInput</code>) — | |
| The input image to condition the generation on. Must be an image, a list of images or a <code>torch.Tensor</code>.`,name:"image"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Guidance rescale factor proposed by <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a> <code>guidance_scale</code> is defined as <code>φ</code> in equation 16. of | |
| <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are Flawed</a>. | |
| Guidance rescale factor should fix overexposure when using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L596",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),q=new ps({props:{anchor:"diffusers.LTXImageToVideoPipeline.__call__.example",$$slots:{default:[lo]},$$scope:{ctx:G}}}),Ge=new I({props:{name:"encode_prompt",anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 128"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L306"}}),Xe=new De({props:{title:"LTXConditionPipeline",local:"diffusers.LTXConditionPipeline",headingTag:"h2"}}),Be=new I({props:{name:"class diffusers.LTXConditionPipeline",anchor:"diffusers.LTXConditionPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXConditionPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12067/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXConditionPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12067/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXConditionPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXConditionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXConditionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L252"}}),xe=new I({props:{name:"__call__",anchor:"diffusers.LTXConditionPipeline.__call__",parameters:[{name:"conditions",val:": typing.Union[diffusers.pipelines.ltx.pipeline_ltx_condition.LTXVideoCondition, typing.List[diffusers.pipelines.ltx.pipeline_ltx_condition.LTXVideoCondition]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]]] = None"},{name:"video",val:": typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]] = None"},{name:"frame_index",val:": typing.Union[int, typing.List[int]] = 0"},{name:"strength",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"denoise_strength",val:": float = 1.0"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"guidance_rescale",val:": float = 0.0"},{name:"image_cond_noise_scale",val:": float = 0.15"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 256"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.__call__.conditions",description:`<strong>conditions</strong> (<code>List[LTXVideoCondition], *optional*</code>) — | |
| The list of frame-conditioning items for the video generation.If not provided, conditions will be | |
| created using <code>image</code>, <code>video</code>, <code>frame_index</code> and <code>strength</code>.`,name:"conditions"},{anchor:"diffusers.LTXConditionPipeline.__call__.image",description:`<strong>image</strong> (<code>PipelineImageInput</code> or <code>List[PipelineImageInput]</code>, <em>optional</em>) — | |
| The image or images to condition the video generation. If not provided, one has to pass <code>video</code> or | |
| <code>conditions</code>.`,name:"image"},{anchor:"diffusers.LTXConditionPipeline.__call__.video",description:`<strong>video</strong> (<code>List[PipelineImageInput]</code>, <em>optional</em>) — | |
| The video to condition the video generation. If not provided, one has to pass <code>image</code> or <code>conditions</code>.`,name:"video"},{anchor:"diffusers.LTXConditionPipeline.__call__.frame_index",description:`<strong>frame_index</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>) — | |
| The frame index or frame indices at which the image or video will conditionally effect the video | |
| generation. If not provided, one has to pass <code>conditions</code>.`,name:"frame_index"},{anchor:"diffusers.LTXConditionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>) — | |
| The strength or strengths of the conditioning effect. If not provided, one has to pass <code>conditions</code>.`,name:"strength"},{anchor:"diffusers.LTXConditionPipeline.__call__.denoise_strength",description:`<strong>denoise_strength</strong> (<code>float</code>, defaults to <code>1.0</code>) — | |
| The strength of the noise added to the latents for editing. Higher strength leads to more noise added | |
| to the latents, therefore leading to more differences between original video and generated video. This | |
| is useful for video-to-video editing.`,name:"denoise_strength"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXConditionPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXConditionPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXConditionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXConditionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXConditionPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Guidance rescale factor proposed by <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a> <code>guidance_scale</code> is defined as <code>φ</code> in equation 16. of | |
| <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are Flawed</a>. | |
| Guidance rescale factor should fix overexposure when using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXConditionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXConditionPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXConditionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXConditionPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXConditionPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXConditionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXConditionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXConditionPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXConditionPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXConditionPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXConditionPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L848",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),K=new ps({props:{anchor:"diffusers.LTXConditionPipeline.__call__.example",$$slots:{default:[io]},$$scope:{ctx:G}}}),We=new I({props:{name:"add_noise_to_image_conditioning_latents",anchor:"diffusers.LTXConditionPipeline.add_noise_to_image_conditioning_latents",parameters:[{name:"t",val:": float"},{name:"init_latents",val:": Tensor"},{name:"latents",val:": Tensor"},{name:"noise_scale",val:": float"},{name:"conditioning_mask",val:": Tensor"},{name:"generator",val:""},{name:"eps",val:" = 1e-06"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L646"}}),ke=new I({props:{name:"encode_prompt",anchor:"diffusers.LTXConditionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 256"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L369"}}),Ve=new I({props:{name:"trim_conditioning_sequence",anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence",parameters:[{name:"start_frame",val:": int"},{name:"sequence_num_frames",val:": int"},{name:"target_num_frames",val:": int"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.start_frame",description:"<strong>start_frame</strong> (int) — The target frame number of the first frame in the sequence.",name:"start_frame"},{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.sequence_num_frames",description:"<strong>sequence_num_frames</strong> (int) — The number of frames in the sequence.",name:"sequence_num_frames"},{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.target_num_frames",description:"<strong>target_num_frames</strong> (int) — The target number of frames in the generated video.",name:"target_num_frames"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L629",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>updated sequence length</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>int</p> | |
| `}}),Ce=new De({props:{title:"LTXLatentUpsamplePipeline",local:"diffusers.LTXLatentUpsamplePipeline",headingTag:"h2"}}),Re=new I({props:{name:"class diffusers.LTXLatentUpsamplePipeline",anchor:"diffusers.LTXLatentUpsamplePipeline",parameters:[{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"latent_upsampler",val:": LTXLatentUpsamplerModel"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L46"}}),Le=new I({props:{name:"__call__",anchor:"diffusers.LTXLatentUpsamplePipeline.__call__",parameters:[{name:"video",val:": typing.Optional[typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]]] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"adain_factor",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L184"}}),Ne=new I({props:{name:"adain_filter_latent",anchor:"diffusers.LTXLatentUpsamplePipeline.adain_filter_latent",parameters:[{name:"latents",val:": Tensor"},{name:"reference_latents",val:": Tensor"},{name:"factor",val:": float = 1.0"}],parametersDescription:[{anchor:"diffusers.LTXLatentUpsamplePipeline.adain_filter_latent.latent",description:`<strong>latent</strong> (<code>torch.Tensor</code>) — | |
| Input latents to normalize`,name:"latent"},{anchor:"diffusers.LTXLatentUpsamplePipeline.adain_filter_latent.reference_latents",description:`<strong>reference_latents</strong> (<code>torch.Tensor</code>) — | |
| The reference latents providing style statistics.`,name:"reference_latents"},{anchor:"diffusers.LTXLatentUpsamplePipeline.adain_filter_latent.factor",description:`<strong>factor</strong> (<code>float</code>) — | |
| Blending factor between original and transformed latent. Range: -10.0 to 10.0, Default: 1.0`,name:"factor"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L96",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The transformed latent tensor</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>torch.Tensor</p> | |
| `}}),Ye=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.LTXLatentUpsamplePipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L153"}}),Fe=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.LTXLatentUpsamplePipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L168"}}),He=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.LTXLatentUpsamplePipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L146"}}),Ee=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.LTXLatentUpsamplePipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L160"}}),Qe=new De({props:{title:"LTXPipelineOutput",local:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",headingTag:"h2"}}),$e=new I({props:{name:"class diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",anchor:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",parameters:[{name:"frames",val:": Tensor"}],parametersDescription:[{anchor:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput.frames",description:`<strong>frames</strong> (<code>torch.Tensor</code>, <code>np.ndarray</code>, or List[List[PIL.Image.Image]]) — | |
| List of video outputs - It can be a nested list of length <code>batch_size,</code> with each sub-list containing | |
| denoised PIL image sequences of length <code>num_frames.</code> It can also be a NumPy array or Torch tensor of shape | |
| <code>(batch_size, num_frames, channels, height, width)</code>.`,name:"frames"}],source:"https://github.com/huggingface/diffusers/blob/vr_12067/src/diffusers/pipelines/ltx/pipeline_output.py#L8"}}),ze=new Ks({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/ltx_video.md"}}),{c(){r=l("meta"),J=s(),u=l("p"),c=s(),p=l("div"),p.innerHTML=a,b=s(),h(X.$$.fragment),U=s(),C=l("p"),C.innerHTML=cs,Wt=s(),re=l("p"),re.innerHTML=ms,kt=s(),h(S.$$.fragment),Vt=s(),de=l("p"),de.textContent=us,Ct=s(),h(P.$$.fragment),Rt=s(),h(pe.$$.fragment),Lt=s(),W=l("ul"),qe=l("li"),qe.innerHTML=hs,hn=s(),ce=l("li"),Oe=l("p"),Oe.textContent=gs,gn=s(),me=l("details"),Ke=l("summary"),Ke.textContent=fs,fn=s(),h(ue.$$.fragment),Mn=s(),E=l("li"),et=l("p"),et.innerHTML=Ms,yn=s(),tt=l("ul"),tt.innerHTML=ys,_n=s(),he=l("details"),nt=l("summary"),nt.textContent=_s,Tn=s(),h(ge.$$.fragment),bn=s(),fe=l("li"),st=l("p"),st.innerHTML=Ts,wn=s(),Me=l("details"),ot=l("summary"),ot.textContent=bs,Jn=s(),h(ye.$$.fragment),Un=s(),_e=l("li"),at=l("p"),at.innerHTML=ws,jn=s(),Te=l("details"),lt=l("summary"),lt.textContent=Js,vn=s(),h(be.$$.fragment),Nt=s(),h(we.$$.fragment),Yt=s(),B=l("div"),h(Je.$$.fragment),Zn=s(),it=l("p"),it.textContent=Us,In=s(),rt=l("p"),rt.innerHTML=js,Gn=s(),L=l("div"),h(Ue.$$.fragment),Xn=s(),dt=l("p"),dt.textContent=vs,Bn=s(),h(A.$$.fragment),xn=s(),D=l("div"),h(je.$$.fragment),Wn=s(),pt=l("p"),pt.textContent=Zs,Ft=s(),h(ve.$$.fragment),Ht=s(),x=l("div"),h(Ze.$$.fragment),kn=s(),ct=l("p"),ct.textContent=Is,Vn=s(),mt=l("p"),mt.innerHTML=Gs,Cn=s(),N=l("div"),h(Ie.$$.fragment),Rn=s(),ut=l("p"),ut.textContent=Xs,Ln=s(),h(q.$$.fragment),Nn=s(),O=l("div"),h(Ge.$$.fragment),Yn=s(),ht=l("p"),ht.textContent=Bs,Et=s(),h(Xe.$$.fragment),Qt=s(),v=l("div"),h(Be.$$.fragment),Fn=s(),gt=l("p"),gt.textContent=xs,Hn=s(),ft=l("p"),ft.innerHTML=Ws,En=s(),Y=l("div"),h(xe.$$.fragment),Qn=s(),Mt=l("p"),Mt.textContent=ks,$n=s(),h(K.$$.fragment),zn=s(),ee=l("div"),h(We.$$.fragment),Sn=s(),yt=l("p"),yt.textContent=Vs,Pn=s(),te=l("div"),h(ke.$$.fragment),An=s(),_t=l("p"),_t.textContent=Cs,Dn=s(),ne=l("div"),h(Ve.$$.fragment),qn=s(),Tt=l("p"),Tt.textContent=Rs,$t=s(),h(Ce.$$.fragment),zt=s(),Z=l("div"),h(Re.$$.fragment),On=s(),bt=l("div"),h(Le.$$.fragment),Kn=s(),se=l("div"),h(Ne.$$.fragment),es=s(),wt=l("p"),wt.textContent=Ls,ts=s(),oe=l("div"),h(Ye.$$.fragment),ns=s(),Jt=l("p"),Jt.innerHTML=Ns,ss=s(),ae=l("div"),h(Fe.$$.fragment),os=s(),Ut=l("p"),Ut.innerHTML=Ys,as=s(),le=l("div"),h(He.$$.fragment),ls=s(),jt=l("p"),jt.textContent=Fs,is=s(),ie=l("div"),h(Ee.$$.fragment),rs=s(),vt=l("p"),vt.textContent=Hs,St=s(),h(Qe.$$.fragment),Pt=s(),Q=l("div"),h($e.$$.fragment),ds=s(),Zt=l("p"),Zt.textContent=Es,At=s(),h(ze.$$.fragment),Dt=s(),xt=l("p"),this.h()},l(e){const d=Ds("svelte-u9bgzb",document.head);r=i(d,"META",{name:!0,content:!0}),d.forEach(n),J=o(e),u=i(e,"P",{}),w(u).forEach(n),c=o(e),p=i(e,"DIV",{style:!0,"data-svelte-h":!0}),T(p)!=="svelte-py9lmv"&&(p.innerHTML=a),b=o(e),g(X.$$.fragment,e),U=o(e),C=i(e,"P",{"data-svelte-h":!0}),T(C)!=="svelte-1mgfd6u"&&(C.innerHTML=cs),Wt=o(e),re=i(e,"P",{"data-svelte-h":!0}),T(re)!=="svelte-1134kk7"&&(re.innerHTML=ms),kt=o(e),g(S.$$.fragment,e),Vt=o(e),de=i(e,"P",{"data-svelte-h":!0}),T(de)!=="svelte-pzhop3"&&(de.textContent=us),Ct=o(e),g(P.$$.fragment,e),Rt=o(e),g(pe.$$.fragment,e),Lt=o(e),W=i(e,"UL",{});var R=w(W);qe=i(R,"LI",{"data-svelte-h":!0}),T(qe)!=="svelte-11s63gz"&&(qe.innerHTML=hs),hn=o(R),ce=i(R,"LI",{});var Se=w(ce);Oe=i(Se,"P",{"data-svelte-h":!0}),T(Oe)!=="svelte-nohx4d"&&(Oe.textContent=gs),gn=o(Se),me=i(Se,"DETAILS",{});var Pe=w(me);Ke=i(Pe,"SUMMARY",{"data-svelte-h":!0}),T(Ke)!=="svelte-1m0l1gk"&&(Ke.textContent=fs),fn=o(Pe),g(ue.$$.fragment,Pe),Pe.forEach(n),Se.forEach(n),Mn=o(R),E=i(R,"LI",{});var $=w(E);et=i($,"P",{"data-svelte-h":!0}),T(et)!=="svelte-12jhl43"&&(et.innerHTML=Ms),yn=o($),tt=i($,"UL",{"data-svelte-h":!0}),T(tt)!=="svelte-lvdcu8"&&(tt.innerHTML=ys),_n=o($),he=i($,"DETAILS",{});var Ae=w(he);nt=i(Ae,"SUMMARY",{"data-svelte-h":!0}),T(nt)!=="svelte-1m0l1gk"&&(nt.textContent=_s),Tn=o(Ae),g(ge.$$.fragment,Ae),Ae.forEach(n),$.forEach(n),bn=o(R),fe=i(R,"LI",{});var Ot=w(fe);st=i(Ot,"P",{"data-svelte-h":!0}),T(st)!=="svelte-wbgs91"&&(st.innerHTML=Ts),wn=o(Ot),Me=i(Ot,"DETAILS",{});var Kt=w(Me);ot=i(Kt,"SUMMARY",{"data-svelte-h":!0}),T(ot)!=="svelte-1m0l1gk"&&(ot.textContent=bs),Jn=o(Kt),g(ye.$$.fragment,Kt),Kt.forEach(n),Ot.forEach(n),Un=o(R),_e=i(R,"LI",{});var en=w(_e);at=i(en,"P",{"data-svelte-h":!0}),T(at)!=="svelte-28t107"&&(at.innerHTML=ws),jn=o(en),Te=i(en,"DETAILS",{});var tn=w(Te);lt=i(tn,"SUMMARY",{"data-svelte-h":!0}),T(lt)!=="svelte-1m0l1gk"&&(lt.textContent=Js),vn=o(tn),g(be.$$.fragment,tn),tn.forEach(n),en.forEach(n),R.forEach(n),Nt=o(e),g(we.$$.fragment,e),Yt=o(e),B=i(e,"DIV",{class:!0});var F=w(B);g(Je.$$.fragment,F),Zn=o(F),it=i(F,"P",{"data-svelte-h":!0}),T(it)!=="svelte-19ipoo4"&&(it.textContent=Us),In=o(F),rt=i(F,"P",{"data-svelte-h":!0}),T(rt)!=="svelte-1sr6eg8"&&(rt.innerHTML=js),Gn=o(F),L=i(F,"DIV",{class:!0});var It=w(L);g(Ue.$$.fragment,It),Xn=o(It),dt=i(It,"P",{"data-svelte-h":!0}),T(dt)!=="svelte-v78lg8"&&(dt.textContent=vs),Bn=o(It),g(A.$$.fragment,It),It.forEach(n),xn=o(F),D=i(F,"DIV",{class:!0});var nn=w(D);g(je.$$.fragment,nn),Wn=o(nn),pt=i(nn,"P",{"data-svelte-h":!0}),T(pt)!=="svelte-16q0ax1"&&(pt.textContent=Zs),nn.forEach(n),F.forEach(n),Ft=o(e),g(ve.$$.fragment,e),Ht=o(e),x=i(e,"DIV",{class:!0});var H=w(x);g(Ze.$$.fragment,H),kn=o(H),ct=i(H,"P",{"data-svelte-h":!0}),T(ct)!=="svelte-10tczlw"&&(ct.textContent=Is),Vn=o(H),mt=i(H,"P",{"data-svelte-h":!0}),T(mt)!=="svelte-1sr6eg8"&&(mt.innerHTML=Gs),Cn=o(H),N=i(H,"DIV",{class:!0});var Gt=w(N);g(Ie.$$.fragment,Gt),Rn=o(Gt),ut=i(Gt,"P",{"data-svelte-h":!0}),T(ut)!=="svelte-v78lg8"&&(ut.textContent=Xs),Ln=o(Gt),g(q.$$.fragment,Gt),Gt.forEach(n),Nn=o(H),O=i(H,"DIV",{class:!0});var sn=w(O);g(Ge.$$.fragment,sn),Yn=o(sn),ht=i(sn,"P",{"data-svelte-h":!0}),T(ht)!=="svelte-16q0ax1"&&(ht.textContent=Bs),sn.forEach(n),H.forEach(n),Et=o(e),g(Xe.$$.fragment,e),Qt=o(e),v=i(e,"DIV",{class:!0});var k=w(v);g(Be.$$.fragment,k),Fn=o(k),gt=i(k,"P",{"data-svelte-h":!0}),T(gt)!=="svelte-4vzu4m"&&(gt.textContent=xs),Hn=o(k),ft=i(k,"P",{"data-svelte-h":!0}),T(ft)!=="svelte-1sr6eg8"&&(ft.innerHTML=Ws),En=o(k),Y=i(k,"DIV",{class:!0});var Xt=w(Y);g(xe.$$.fragment,Xt),Qn=o(Xt),Mt=i(Xt,"P",{"data-svelte-h":!0}),T(Mt)!=="svelte-v78lg8"&&(Mt.textContent=ks),$n=o(Xt),g(K.$$.fragment,Xt),Xt.forEach(n),zn=o(k),ee=i(k,"DIV",{class:!0});var on=w(ee);g(We.$$.fragment,on),Sn=o(on),yt=i(on,"P",{"data-svelte-h":!0}),T(yt)!=="svelte-9ak1um"&&(yt.textContent=Vs),on.forEach(n),Pn=o(k),te=i(k,"DIV",{class:!0});var an=w(te);g(ke.$$.fragment,an),An=o(an),_t=i(an,"P",{"data-svelte-h":!0}),T(_t)!=="svelte-16q0ax1"&&(_t.textContent=Cs),an.forEach(n),Dn=o(k),ne=i(k,"DIV",{class:!0});var ln=w(ne);g(Ve.$$.fragment,ln),qn=o(ln),Tt=i(ln,"P",{"data-svelte-h":!0}),T(Tt)!=="svelte-1eod455"&&(Tt.textContent=Rs),ln.forEach(n),k.forEach(n),$t=o(e),g(Ce.$$.fragment,e),zt=o(e),Z=i(e,"DIV",{class:!0});var V=w(Z);g(Re.$$.fragment,V),On=o(V),bt=i(V,"DIV",{class:!0});var Qs=w(bt);g(Le.$$.fragment,Qs),Qs.forEach(n),Kn=o(V),se=i(V,"DIV",{class:!0});var rn=w(se);g(Ne.$$.fragment,rn),es=o(rn),wt=i(rn,"P",{"data-svelte-h":!0}),T(wt)!=="svelte-tr32vd"&&(wt.textContent=Ls),rn.forEach(n),ts=o(V),oe=i(V,"DIV",{class:!0});var dn=w(oe);g(Ye.$$.fragment,dn),ns=o(dn),Jt=i(dn,"P",{"data-svelte-h":!0}),T(Jt)!=="svelte-1s3c06i"&&(Jt.innerHTML=Ns),dn.forEach(n),ss=o(V),ae=i(V,"DIV",{class:!0});var pn=w(ae);g(Fe.$$.fragment,pn),os=o(pn),Ut=i(pn,"P",{"data-svelte-h":!0}),T(Ut)!=="svelte-pkn4ui"&&(Ut.innerHTML=Ys),pn.forEach(n),as=o(V),le=i(V,"DIV",{class:!0});var cn=w(le);g(He.$$.fragment,cn),ls=o(cn),jt=i(cn,"P",{"data-svelte-h":!0}),T(jt)!=="svelte-14bnrb6"&&(jt.textContent=Fs),cn.forEach(n),is=o(V),ie=i(V,"DIV",{class:!0});var mn=w(ie);g(Ee.$$.fragment,mn),rs=o(mn),vt=i(mn,"P",{"data-svelte-h":!0}),T(vt)!=="svelte-1xwrf7t"&&(vt.textContent=Hs),mn.forEach(n),V.forEach(n),St=o(e),g(Qe.$$.fragment,e),Pt=o(e),Q=i(e,"DIV",{class:!0});var un=w(Q);g($e.$$.fragment,un),ds=o(un),Zt=i(un,"P",{"data-svelte-h":!0}),T(Zt)!=="svelte-ia4jjd"&&(Zt.textContent=Es),un.forEach(n),At=o(e),g(ze.$$.fragment,e),Dt=o(e),xt=i(e,"P",{}),w(xt).forEach(n),this.h()},h(){j(r,"name","hf:doc:metadata"),j(r,"content",po),qs(p,"float","right"),j(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(bt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),j(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,d){t(document.head,r),m(e,J,d),m(e,u,d),m(e,c,d),m(e,p,d),m(e,b,d),f(X,e,d),m(e,U,d),m(e,C,d),m(e,Wt,d),m(e,re,d),m(e,kt,d),f(S,e,d),m(e,Vt,d),m(e,de,d),m(e,Ct,d),f(P,e,d),m(e,Rt,d),f(pe,e,d),m(e,Lt,d),m(e,W,d),t(W,qe),t(W,hn),t(W,ce),t(ce,Oe),t(ce,gn),t(ce,me),t(me,Ke),t(me,fn),f(ue,me,null),t(W,Mn),t(W,E),t(E,et),t(E,yn),t(E,tt),t(E,_n),t(E,he),t(he,nt),t(he,Tn),f(ge,he,null),t(W,bn),t(W,fe),t(fe,st),t(fe,wn),t(fe,Me),t(Me,ot),t(Me,Jn),f(ye,Me,null),t(W,Un),t(W,_e),t(_e,at),t(_e,jn),t(_e,Te),t(Te,lt),t(Te,vn),f(be,Te,null),m(e,Nt,d),f(we,e,d),m(e,Yt,d),m(e,B,d),f(Je,B,null),t(B,Zn),t(B,it),t(B,In),t(B,rt),t(B,Gn),t(B,L),f(Ue,L,null),t(L,Xn),t(L,dt),t(L,Bn),f(A,L,null),t(B,xn),t(B,D),f(je,D,null),t(D,Wn),t(D,pt),m(e,Ft,d),f(ve,e,d),m(e,Ht,d),m(e,x,d),f(Ze,x,null),t(x,kn),t(x,ct),t(x,Vn),t(x,mt),t(x,Cn),t(x,N),f(Ie,N,null),t(N,Rn),t(N,ut),t(N,Ln),f(q,N,null),t(x,Nn),t(x,O),f(Ge,O,null),t(O,Yn),t(O,ht),m(e,Et,d),f(Xe,e,d),m(e,Qt,d),m(e,v,d),f(Be,v,null),t(v,Fn),t(v,gt),t(v,Hn),t(v,ft),t(v,En),t(v,Y),f(xe,Y,null),t(Y,Qn),t(Y,Mt),t(Y,$n),f(K,Y,null),t(v,zn),t(v,ee),f(We,ee,null),t(ee,Sn),t(ee,yt),t(v,Pn),t(v,te),f(ke,te,null),t(te,An),t(te,_t),t(v,Dn),t(v,ne),f(Ve,ne,null),t(ne,qn),t(ne,Tt),m(e,$t,d),f(Ce,e,d),m(e,zt,d),m(e,Z,d),f(Re,Z,null),t(Z,On),t(Z,bt),f(Le,bt,null),t(Z,Kn),t(Z,se),f(Ne,se,null),t(se,es),t(se,wt),t(Z,ts),t(Z,oe),f(Ye,oe,null),t(oe,ns),t(oe,Jt),t(Z,ss),t(Z,ae),f(Fe,ae,null),t(ae,os),t(ae,Ut),t(Z,as),t(Z,le),f(He,le,null),t(le,ls),t(le,jt),t(Z,is),t(Z,ie),f(Ee,ie,null),t(ie,rs),t(ie,vt),m(e,St,d),f(Qe,e,d),m(e,Pt,d),m(e,Q,d),f($e,Q,null),t(Q,ds),t(Q,Zt),m(e,At,d),f(ze,e,d),m(e,Dt,d),m(e,xt,d),qt=!0},p(e,[d]){const R={};d&2&&(R.$$scope={dirty:d,ctx:e}),S.$set(R);const Se={};d&2&&(Se.$$scope={dirty:d,ctx:e}),P.$set(Se);const Pe={};d&2&&(Pe.$$scope={dirty:d,ctx:e}),A.$set(Pe);const $={};d&2&&($.$$scope={dirty:d,ctx:e}),q.$set($);const Ae={};d&2&&(Ae.$$scope={dirty:d,ctx:e}),K.$set(Ae)},i(e){qt||(M(X.$$.fragment,e),M(S.$$.fragment,e),M(P.$$.fragment,e),M(pe.$$.fragment,e),M(ue.$$.fragment,e),M(ge.$$.fragment,e),M(ye.$$.fragment,e),M(be.$$.fragment,e),M(we.$$.fragment,e),M(Je.$$.fragment,e),M(Ue.$$.fragment,e),M(A.$$.fragment,e),M(je.$$.fragment,e),M(ve.$$.fragment,e),M(Ze.$$.fragment,e),M(Ie.$$.fragment,e),M(q.$$.fragment,e),M(Ge.$$.fragment,e),M(Xe.$$.fragment,e),M(Be.$$.fragment,e),M(xe.$$.fragment,e),M(K.$$.fragment,e),M(We.$$.fragment,e),M(ke.$$.fragment,e),M(Ve.$$.fragment,e),M(Ce.$$.fragment,e),M(Re.$$.fragment,e),M(Le.$$.fragment,e),M(Ne.$$.fragment,e),M(Ye.$$.fragment,e),M(Fe.$$.fragment,e),M(He.$$.fragment,e),M(Ee.$$.fragment,e),M(Qe.$$.fragment,e),M($e.$$.fragment,e),M(ze.$$.fragment,e),qt=!0)},o(e){y(X.$$.fragment,e),y(S.$$.fragment,e),y(P.$$.fragment,e),y(pe.$$.fragment,e),y(ue.$$.fragment,e),y(ge.$$.fragment,e),y(ye.$$.fragment,e),y(be.$$.fragment,e),y(we.$$.fragment,e),y(Je.$$.fragment,e),y(Ue.$$.fragment,e),y(A.$$.fragment,e),y(je.$$.fragment,e),y(ve.$$.fragment,e),y(Ze.$$.fragment,e),y(Ie.$$.fragment,e),y(q.$$.fragment,e),y(Ge.$$.fragment,e),y(Xe.$$.fragment,e),y(Be.$$.fragment,e),y(xe.$$.fragment,e),y(K.$$.fragment,e),y(We.$$.fragment,e),y(ke.$$.fragment,e),y(Ve.$$.fragment,e),y(Ce.$$.fragment,e),y(Re.$$.fragment,e),y(Le.$$.fragment,e),y(Ne.$$.fragment,e),y(Ye.$$.fragment,e),y(Fe.$$.fragment,e),y(He.$$.fragment,e),y(Ee.$$.fragment,e),y(Qe.$$.fragment,e),y($e.$$.fragment,e),y(ze.$$.fragment,e),qt=!1},d(e){e&&(n(J),n(u),n(c),n(p),n(b),n(U),n(C),n(Wt),n(re),n(kt),n(Vt),n(de),n(Ct),n(Rt),n(Lt),n(W),n(Nt),n(Yt),n(B),n(Ft),n(Ht),n(x),n(Et),n(Qt),n(v),n($t),n(zt),n(Z),n(St),n(Pt),n(Q),n(At),n(Dt),n(xt)),n(r),_(X,e),_(S,e),_(P,e),_(pe,e),_(ue),_(ge),_(ye),_(be),_(we,e),_(Je),_(Ue),_(A),_(je),_(ve,e),_(Ze),_(Ie),_(q),_(Ge),_(Xe,e),_(Be),_(xe),_(K),_(We),_(ke),_(Ve),_(Ce,e),_(Re),_(Le),_(Ne),_(Ye),_(Fe),_(He),_(Ee),_(Qe,e),_($e),_(ze,e)}}}const po='{"title":"LTX-Video","local":"ltx-video","sections":[{"title":"Notes","local":"notes","sections":[],"depth":2},{"title":"LTXPipeline","local":"diffusers.LTXPipeline","sections":[],"depth":2},{"title":"LTXImageToVideoPipeline","local":"diffusers.LTXImageToVideoPipeline","sections":[],"depth":2},{"title":"LTXConditionPipeline","local":"diffusers.LTXConditionPipeline","sections":[],"depth":2},{"title":"LTXLatentUpsamplePipeline","local":"diffusers.LTXLatentUpsamplePipeline","sections":[],"depth":2},{"title":"LTXPipelineOutput","local":"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput","sections":[],"depth":2}],"depth":1}';function co(G){return Ss(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class To extends Ps{constructor(r){super(),As(this,r,co,ro,zs,{})}}export{To as component}; | |
Xet Storage Details
- Size:
- 143 kB
- Xet hash:
- 0ff27df49c67a04b42de2fde4d38bc2480ee7eb2e9872928e9424b375b8fb682
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.