Buckets:
| import{s as Wo,o as Lo,n as un}from"../chunks/scheduler.8c3d61f6.js";import{S as Co,i as Ro,g as l,s as o,r as p,A as No,h as r,f as n,c as s,j as J,u as c,x as y,k as b,y as i,a,v as m,d as g,t as h,w as u}from"../chunks/index.da70eac4.js";import{T as Fo}from"../chunks/Tip.1d9b8c37.js";import{D as U}from"../chunks/Docstring.0b9cc58b.js";import{C as H}from"../chunks/CodeBlock.a9c4becf.js";import{E as Dn}from"../chunks/ExampleCodeBlock.ba0ba69d.js";import{H as R,E as Ho}from"../chunks/index.a831177d.js";function $o(k){let f,j='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){f=l("p"),f.innerHTML=j},l(M){f=r(M,"P",{"data-svelte-h":!0}),y(f)!=="svelte-1qn15hi"&&(f.innerHTML=j)},m(M,_){a(M,f,_)},p:un,d(M){M&&n(f)}}}function Eo(k){let f,j="Examples:",M,_,T;return _=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBJTBBcGlwZSUyMCUzRCUyMExUWFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHdvbWFuJTIwd2l0aCUyMGxvbmclMjBicm93biUyMGhhaXIlMjBhbmQlMjBsaWdodCUyMHNraW4lMjBzbWlsZXMlMjBhdCUyMGFub3RoZXIlMjB3b21hbiUyMHdpdGglMjBsb25nJTIwYmxvbmRlJTIwaGFpci4lMjBUaGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIlMjB3ZWFycyUyMGElMjBibGFjayUyMGphY2tldCUyMGFuZCUyMGhhcyUyMGElMjBzbWFsbCUyQyUyMGJhcmVseSUyMG5vdGljZWFibGUlMjBtb2xlJTIwb24lMjBoZXIlMjByaWdodCUyMGNoZWVrLiUyMFRoZSUyMGNhbWVyYSUyMGFuZ2xlJTIwaXMlMjBhJTIwY2xvc2UtdXAlMkMlMjBmb2N1c2VkJTIwb24lMjB0aGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIncyUyMGZhY2UuJTIwVGhlJTIwbGlnaHRpbmclMjBpcyUyMHdhcm0lMjBhbmQlMjBuYXR1cmFsJTJDJTIwbGlrZWx5JTIwZnJvbSUyMHRoZSUyMHNldHRpbmclMjBzdW4lMkMlMjBjYXN0aW5nJTIwYSUyMHNvZnQlMjBnbG93JTIwb24lMjB0aGUlMjBzY2VuZS4lMjBUaGUlMjBzY2VuZSUyMGFwcGVhcnMlMjB0byUyMGJlJTIwcmVhbC1saWZlJTIwZm9vdGFnZSUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEElMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q3MDQlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q0ODAlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYxJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBKS5mcmFtZXMlNUIwJTVEJTBBZXhwb3J0X3RvX3ZpZGVvKHZpZGVvJTJDJTIwJTIyb3V0cHV0Lm1wNCUyMiUyQyUyMGZwcyUzRDI0KQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-meta">>>> </span>pipe = LTXPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">704</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">480</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){f=l("p"),f.textContent=j,M=o(),p(_.$$.fragment)},l(d){f=r(d,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=j),M=s(d),c(_.$$.fragment,d)},m(d,w){a(d,f,w),a(d,M,w),m(_,d,w),T=!0},p:un,i(d){T||(g(_.$$.fragment,d),T=!0)},o(d){h(_.$$.fragment,d),T=!1},d(d){d&&(n(f),n(M)),u(_,d)}}}function zo(k){let f,j="Examples:",M,_,T;return _=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYSW1hZ2VUb1ZpZGVvUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTJDJTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBMVFhJbWFnZVRvVmlkZW9QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZhLXItci1vLXclMkZ0aW55LW1lbWUtZGF0YXNldC1jYXB0aW9uZWQlMkZyZXNvbHZlJTJGbWFpbiUyRmltYWdlcyUyRjgucG5nJTIyJTBBKSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjB5b3VuZyUyMGdpcmwlMjBzdGFuZHMlMjBjYWxtbHklMjBpbiUyMHRoZSUyMGZvcmVncm91bmQlMkMlMjBsb29raW5nJTIwZGlyZWN0bHklMjBhdCUyMHRoZSUyMGNhbWVyYSUyQyUyMGFzJTIwYSUyMGhvdXNlJTIwZmlyZSUyMHJhZ2VzJTIwaW4lMjB0aGUlMjBiYWNrZ3JvdW5kLiUyMEZsYW1lcyUyMGVuZ3VsZiUyMHRoZSUyMHN0cnVjdHVyZSUyQyUyMHdpdGglMjBzbW9rZSUyMGJpbGxvd2luZyUyMGludG8lMjB0aGUlMjBhaXIuJTIwRmlyZWZpZ2h0ZXJzJTIwaW4lMjBwcm90ZWN0aXZlJTIwZ2VhciUyMHJ1c2glMjB0byUyMHRoZSUyMHNjZW5lJTJDJTIwYSUyMGZpcmUlMjB0cnVjayUyMGxhYmVsZWQlMjAnMzgnJTIwdmlzaWJsZSUyMGJlaGluZCUyMHRoZW0uJTIwVGhlJTIwZ2lybCdzJTIwbmV1dHJhbCUyMGV4cHJlc3Npb24lMjBjb250cmFzdHMlMjBzaGFycGx5JTIwd2l0aCUyMHRoZSUyMGNoYW9zJTIwb2YlMjB0aGUlMjBmaXJlJTJDJTIwY3JlYXRpbmclMjBhJTIwcG9pZ25hbnQlMjBhbmQlMjBlbW90aW9uYWxseSUyMGNoYXJnZWQlMjBzY2VuZS4lMjIlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjJ3b3JzdCUyMHF1YWxpdHklMkMlMjBpbmNvbnNpc3RlbnQlMjBtb3Rpb24lMkMlMjBibHVycnklMkMlMjBqaXR0ZXJ5JTJDJTIwZGlzdG9ydGVkJTIyJTBBJTBBdmlkZW8lMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDcwNCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDQ4MCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0QxNjElMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fdmlkZW8odmlkZW8lMkMlMjAlMjJvdXRwdXQubXA0JTIyJTJDJTIwZnBzJTNEMjQp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXImageToVideoPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_image | |
| <span class="hljs-meta">>>> </span>pipe = LTXImageToVideoPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/a-r-r-o-w/tiny-meme-dataset-captioned/resolve/main/images/8.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A young girl stands calmly in the foreground, looking directly at the camera, as a house fire rages in the background. Flames engulf the structure, with smoke billowing into the air. Firefighters in protective gear rush to the scene, a fire truck labeled '38' visible behind them. The girl's neutral expression contrasts sharply with the chaos of the fire, creating a poignant and emotionally charged scene."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">704</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">480</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){f=l("p"),f.textContent=j,M=o(),p(_.$$.fragment)},l(d){f=r(d,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=j),M=s(d),c(_.$$.fragment,d)},m(d,w){a(d,f,w),a(d,M,w),m(_,d,w),T=!0},p:un,i(d){T||(g(_.$$.fragment,d),T=!0)},o(d){h(_.$$.fragment,d),T=!1},d(d){d&&(n(f),n(M)),u(_,d)}}}function Yo(k){let f,j="Examples:",M,_,T;return _=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnBpcGVsaW5lcy5sdHgucGlwZWxpbmVfbHR4X2NvbmRpdGlvbiUyMGltcG9ydCUyMExUWENvbmRpdGlvblBpcGVsaW5lJTJDJTIwTFRYVmlkZW9Db25kaXRpb24lMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTJDJTIwbG9hZF92aWRlbyUyQyUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8tMC45LjUlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEElMjMlMjBMb2FkJTIwaW5wdXQlMjBpbWFnZSUyMGFuZCUyMHZpZGVvJTBBdmlkZW8lMjAlM0QlMjBsb2FkX3ZpZGVvKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmNvc21vcyUyRmNvc21vcy12aWRlbzJ3b3JsZC1pbnB1dC12aWQubXA0JTIyJTBBKSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZjb3Ntb3MlMkZjb3Ntb3MtdmlkZW8yd29ybGQtaW5wdXQuanBnJTIyJTBBKSUwQSUwQSUyMyUyMENyZWF0ZSUyMGNvbmRpdGlvbmluZyUyMG9iamVjdHMlMEFjb25kaXRpb24xJTIwJTNEJTIwTFRYVmlkZW9Db25kaXRpb24oJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGZyYW1lX2luZGV4JTNEMCUyQyUwQSklMEFjb25kaXRpb24yJTIwJTNEJTIwTFRYVmlkZW9Db25kaXRpb24oJTBBJTIwJTIwJTIwJTIwdmlkZW8lM0R2aWRlbyUyQyUwQSUyMCUyMCUyMCUyMGZyYW1lX2luZGV4JTNEODAlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyVGhlJTIwdmlkZW8lMjBkZXBpY3RzJTIwYSUyMGxvbmclMkMlMjBzdHJhaWdodCUyMGhpZ2h3YXklMjBzdHJldGNoaW5nJTIwaW50byUyMHRoZSUyMGRpc3RhbmNlJTJDJTIwZmxhbmtlZCUyMGJ5JTIwbWV0YWwlMjBndWFyZHJhaWxzLiUyMFRoZSUyMHJvYWQlMjBpcyUyMGRpdmlkZWQlMjBpbnRvJTIwbXVsdGlwbGUlMjBsYW5lcyUyQyUyMHdpdGglMjBhJTIwZmV3JTIwdmVoaWNsZXMlMjB2aXNpYmxlJTIwaW4lMjB0aGUlMjBmYXIlMjBkaXN0YW5jZS4lMjBUaGUlMjBzdXJyb3VuZGluZyUyMGxhbmRzY2FwZSUyMGZlYXR1cmVzJTIwZHJ5JTJDJTIwZ3Jhc3N5JTIwZmllbGRzJTIwb24lMjBvbmUlMjBzaWRlJTIwYW5kJTIwcm9sbGluZyUyMGhpbGxzJTIwb24lMjB0aGUlMjBvdGhlci4lMjBUaGUlMjBza3klMjBpcyUyMG1vc3RseSUyMGNsZWFyJTIwd2l0aCUyMGElMjBmZXclMjBzY2F0dGVyZWQlMjBjbG91ZHMlMkMlMjBzdWdnZXN0aW5nJTIwYSUyMGJyaWdodCUyQyUyMHN1bm55JTIwZGF5LiUyMEFuZCUyMHRoZW4lMjB0aGUlMjBjYW1lcmElMjBzd2l0Y2glMjB0byUyMGElMjB3aW5kaW5nJTIwbW91bnRhaW4lMjByb2FkJTIwY292ZXJlZCUyMGluJTIwc25vdyUyQyUyMHdpdGglMjBhJTIwc2luZ2xlJTIwdmVoaWNsZSUyMHRyYXZlbGluZyUyMGFsb25nJTIwaXQuJTIwVGhlJTIwcm9hZCUyMGlzJTIwZmxhbmtlZCUyMGJ5JTIwc3RlZXAlMkMlMjByb2NreSUyMGNsaWZmcyUyMGFuZCUyMHNwYXJzZSUyMHZlZ2V0YXRpb24uJTIwVGhlJTIwbGFuZHNjYXBlJTIwaXMlMjBjaGFyYWN0ZXJpemVkJTIwYnklMjBydWdnZWQlMjB0ZXJyYWluJTIwYW5kJTIwYSUyMHJpdmVyJTIwdmlzaWJsZSUyMGluJTIwdGhlJTIwZGlzdGFuY2UuJTIwVGhlJTIwc2NlbmUlMjBjYXB0dXJlcyUyMHRoZSUyMHNvbGl0dWRlJTIwYW5kJTIwYmVhdXR5JTIwb2YlMjBhJTIwd2ludGVyJTIwZHJpdmUlMjB0aHJvdWdoJTIwYSUyMG1vdW50YWlub3VzJTIwcmVnaW9uLiUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEElMEElMjMlMjBHZW5lcmF0ZSUyMHZpZGVvJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCUyMmN1ZGElMjIpLm1hbnVhbF9zZWVkKDApJTBBJTIzJTIwVGV4dC1vbmx5JTIwY29uZGl0aW9uaW5nJTIwaXMlMjBhbHNvJTIwc3VwcG9ydGVkJTIwd2l0aG91dCUyMHRoZSUyMG5lZWQlMjB0byUyMHBhc3MlMjAlNjBjb25kaXRpb25zJTYwJTBBdmlkZW8lMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMGNvbmRpdGlvbnMlM0QlNUJjb25kaXRpb24xJTJDJTIwY29uZGl0aW9uMiU1RCUyQyUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRDE2MSUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q0MCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUwQSkuZnJhbWVzJTVCMCU1RCUwQSUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNCk=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.pipelines.ltx.pipeline_ltx_condition <span class="hljs-keyword">import</span> LTXConditionPipeline, LTXVideoCondition | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_video, load_image | |
| <span class="hljs-meta">>>> </span>pipe = LTXConditionPipeline.from_pretrained(<span class="hljs-string">"Lightricks/LTX-Video-0.9.5"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Load input image and video</span> | |
| <span class="hljs-meta">>>> </span>video = load_video( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input.jpg"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Create conditioning objects</span> | |
| <span class="hljs-meta">>>> </span>condition1 = LTXVideoCondition( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> frame_index=<span class="hljs-number">0</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>condition2 = LTXVideoCondition( | |
| <span class="hljs-meta">... </span> video=video, | |
| <span class="hljs-meta">... </span> frame_index=<span class="hljs-number">80</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"The video depicts a long, straight highway stretching into the distance, flanked by metal guardrails. The road is divided into multiple lanes, with a few vehicles visible in the far distance. The surrounding landscape features dry, grassy fields on one side and rolling hills on the other. The sky is mostly clear with a few scattered clouds, suggesting a bright, sunny day. And then the camera switch to a winding mountain road covered in snow, with a single vehicle traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. The landscape is characterized by rugged terrain and a river visible in the distance. The scene captures the solitude and beauty of a winter drive through a mountainous region."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Generate video</span> | |
| <span class="hljs-meta">>>> </span>generator = torch.Generator(<span class="hljs-string">"cuda"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Text-only conditioning is also supported without the need to pass \`conditions\`</span> | |
| <span class="hljs-meta">>>> </span>video = pipe( | |
| <span class="hljs-meta">... </span> conditions=[condition1, condition2], | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">768</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> num_frames=<span class="hljs-number">161</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">40</span>, | |
| <span class="hljs-meta">... </span> generator=generator, | |
| <span class="hljs-meta">... </span>).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),{c(){f=l("p"),f.textContent=j,M=o(),p(_.$$.fragment)},l(d){f=r(d,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=j),M=s(d),c(_.$$.fragment,d)},m(d,w){a(d,f,w),a(d,M,w),m(_,d,w),T=!0},p:un,i(d){T||(g(_.$$.fragment,d),T=!0)},o(d){h(_.$$.fragment,d),T=!1},d(d){d&&(n(f),n(M)),u(_,d)}}}function Qo(k){let f,j,M,_,T,d,w,On='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"/>',Tt,te,Kn='<a href="https://huggingface.co/Lightricks/LTX-Video" rel="nofollow">LTX Video</a> is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content. We provide a model for both text-to-video as well as image + text-to-video usecases.',bt,$,wt,ne,eo="Available models:",Jt,oe,to='<thead><tr><th align="center">Model name</th> <th align="center">Recommended dtype</th></tr></thead> <tbody><tr><td align="center"><a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.safetensors" rel="nofollow"><code>LTX Video 2B 0.9.0</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.1.safetensors" rel="nofollow"><code>LTX Video 2B 0.9.1</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.5.safetensors" rel="nofollow"><code>LTX Video 2B 0.9.5</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-dev.safetensors" rel="nofollow"><code>LTX Video 13B 0.9.7</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-spatial-upscaler-0.9.7.safetensors" rel="nofollow"><code>LTX Video Spatial Upscaler 0.9.7</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr></tbody>',vt,se,no="Note: The recommended dtype is for the transformer component. The VAE and text encoders can be either <code>torch.float32</code>, <code>torch.bfloat16</code> or <code>torch.float16</code> but the recommended dtype is <code>torch.bfloat16</code> as used in the original repository.",Ut,ae,Zt,ie,oo='For the best results, it is recommended to follow the guidelines mentioned in the official LTX Video <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">repository</a>.',jt,le,so="<li>Some variants of LTX Video are guidance-distilled. For guidance-distilled models, <code>guidance_scale</code> must be set to <code>1.0</code>. For any other models, <code>guidance_scale</code> should be set higher (e.g., <code>5.0</code>) for good generation quality.</li> <li>For variants with a timestep-aware VAE (LTXV 0.9.1 and above), it is recommended to set <code>decode_timestep</code> to <code>0.05</code> and <code>image_cond_noise_scale</code> to <code>0.025</code>.</li> <li>For variants that support interpolation between multiple conditioning images and videos (LTXV 0.9.5 and above), it is recommended to use similar looking images/videos for the best results. High divergence between the conditionings may lead to abrupt transitions in the generated video.</li>",It,re,Xt,de,ao="LTX Video 0.9.7 comes with a spatial latent upscaler and a 13B parameter transformer. The inference involves generating a low resolution video first, which is very fast, followed by upscaling and refining the generated video.",Gt,pe,xt,ce,Bt,me,io="Loading the original LTX Video checkpoints is also possible with <code>~ModelMixin.from_single_file</code>. We recommend using <code>from_single_file</code> for the Lightricks series of models, as they plan to release multiple models in the future in the single file format.",Vt,ge,kt,he,lo="Alternatively, the pipeline can be used to load the weights with <code>~FromSingleFileMixin.from_single_file</code>.",Wt,ue,Lt,fe,ro='Loading <a href="https://huggingface.co/city96/LTX-Video-gguf" rel="nofollow">LTX GGUF checkpoints</a> are also supported:',Ct,ye,Rt,_e,po='Make sure to read the <a href="../../quantization/gguf">documentation on GGUF</a> to learn more about our GGUF support.',Nt,Me,co='Loading and running inference with <a href="https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.1.safetensors" rel="nofollow">LTX Video 0.9.1</a> weights.',Ft,Te,Ht,be,mo='Refer to <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox#memory-optimization" rel="nofollow">this section</a> to learn more about optimizing memory consumption.',$t,we,Et,Je,go="Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.",zt,ve,ho='Refer to the <a href="../../quantization/overview">Quantization</a> overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized <a href="/docs/diffusers/pr_11477/en/api/pipelines/ltx_video#diffusers.LTXPipeline">LTXPipeline</a> for inference with bitsandbytes.',Yt,Ue,Qt,Ze,Pt,I,je,fn,De,uo="Pipeline for text-to-video generation.",yn,Oe,fo='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',_n,W,Ie,Mn,Ke,yo="Function invoked when calling the pipeline for generation.",Tn,E,bn,z,Xe,wn,et,_o="Encodes the prompt into text encoder hidden states.",St,Ge,At,X,xe,Jn,tt,Mo="Pipeline for image-to-video generation.",vn,nt,To='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',Un,L,Be,Zn,ot,bo="Function invoked when calling the pipeline for generation.",jn,Y,In,Q,Ve,Xn,st,wo="Encodes the prompt into text encoder hidden states.",qt,ke,Dt,v,We,Gn,at,Jo="Pipeline for text/image/video-to-video generation.",xn,it,vo='Reference: <a href="https://github.com/Lightricks/LTX-Video" rel="nofollow">https://github.com/Lightricks/LTX-Video</a>',Bn,C,Le,Vn,lt,Uo="Function invoked when calling the pipeline for generation.",kn,P,Wn,S,Ce,Ln,rt,Zo=`Add timestep-dependent noise to the hard-conditioning latents. This helps with motion continuity, especially | |
| when conditioned on a single frame.`,Cn,A,Re,Rn,dt,jo="Encodes the prompt into text encoder hidden states.",Nn,q,Ne,Fn,pt,Io="Trim a conditioning sequence to the allowed number of frames.",Ot,Fe,Kt,Z,He,Hn,ct,$e,$n,D,Ee,En,mt,Xo=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,zn,O,ze,Yn,gt,Go=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Qn,K,Ye,Pn,ht,xo=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Sn,ee,Qe,An,ut,Bo=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,en,Pe,tn,N,Se,qn,ft,Vo="Output class for LTX pipelines.",nn,Ae,on,Mt,sn;return T=new R({props:{title:"LTX Video",local:"ltx-video",headingTag:"h1"}}),$=new Fo({props:{$$slots:{default:[$o]},$$scope:{ctx:k}}}),ae=new R({props:{title:"Recommended settings for generation",local:"recommended-settings-for-generation",headingTag:"h2"}}),re=new R({props:{title:"Using LTX Video 13B 0.9.7",local:"using-ltx-video-13b-097",headingTag:"h2"}}),pe=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYQ29uZGl0aW9uUGlwZWxpbmUlMkMlMjBMVFhMYXRlbnRVcHNhbXBsZVBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy5waXBlbGluZXMubHR4LnBpcGVsaW5lX2x0eF9jb25kaXRpb24lMjBpbXBvcnQlMjBMVFhWaWRlb0NvbmRpdGlvbiUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBleHBvcnRfdG9fdmlkZW8lMkMlMjBsb2FkX3ZpZGVvJTBBJTBBcGlwZSUyMCUzRCUyMExUWENvbmRpdGlvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJhLXItci1vLXclMkZMVFgtVmlkZW8tMC45LjctZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlX3Vwc2FtcGxlJTIwJTNEJTIwTFRYTGF0ZW50VXBzYW1wbGVQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyYS1yLXItby13JTJGTFRYLVZpZGVvLTAuOS43LUxhdGVudC1TcGF0aWFsLVVwc2FtcGxlci1kaWZmdXNlcnMlMjIlMkMlMjB2YWUlM0RwaXBlLnZhZSUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGVfdXBzYW1wbGUudG8oJTIyY3VkYSUyMiklMEFwaXBlLnZhZS5lbmFibGVfdGlsaW5nKCklMEElMEFkZWYlMjByb3VuZF90b19uZWFyZXN0X3Jlc29sdXRpb25fYWNjZXB0YWJsZV9ieV92YWUoaGVpZ2h0JTJDJTIwd2lkdGgpJTNBJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTIwJTNEJTIwaGVpZ2h0JTIwLSUyMChoZWlnaHQlMjAlMjUlMjBwaXBlLnZhZV90ZW1wb3JhbF9jb21wcmVzc2lvbl9yYXRpbyklMEElMjAlMjAlMjAlMjB3aWR0aCUyMCUzRCUyMHdpZHRoJTIwLSUyMCh3aWR0aCUyMCUyNSUyMHBpcGUudmFlX3RlbXBvcmFsX2NvbXByZXNzaW9uX3JhdGlvKSUwQSUyMCUyMCUyMCUyMHJldHVybiUyMGhlaWdodCUyQyUyMHdpZHRoJTBBJTBBdmlkZW8lMjAlM0QlMjBsb2FkX3ZpZGVvKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRmNvc21vcyUyRmNvc21vcy12aWRlbzJ3b3JsZC1pbnB1dC12aWQubXA0JTIyJTBBKSU1QiUzQTIxJTVEJTIwJTIwJTIzJTIwVXNlJTIwb25seSUyMHRoZSUyMGZpcnN0JTIwMjElMjBmcmFtZXMlMjBhcyUyMGNvbmRpdGlvbmluZyUwQWNvbmRpdGlvbjElMjAlM0QlMjBMVFhWaWRlb0NvbmRpdGlvbih2aWRlbyUzRHZpZGVvJTJDJTIwZnJhbWVfaW5kZXglM0QwKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMlRoZSUyMHZpZGVvJTIwZGVwaWN0cyUyMGElMjB3aW5kaW5nJTIwbW91bnRhaW4lMjByb2FkJTIwY292ZXJlZCUyMGluJTIwc25vdyUyQyUyMHdpdGglMjBhJTIwc2luZ2xlJTIwdmVoaWNsZSUyMHRyYXZlbGluZyUyMGFsb25nJTIwaXQuJTIwVGhlJTIwcm9hZCUyMGlzJTIwZmxhbmtlZCUyMGJ5JTIwc3RlZXAlMkMlMjByb2NreSUyMGNsaWZmcyUyMGFuZCUyMHNwYXJzZSUyMHZlZ2V0YXRpb24uJTIwVGhlJTIwbGFuZHNjYXBlJTIwaXMlMjBjaGFyYWN0ZXJpemVkJTIwYnklMjBydWdnZWQlMjB0ZXJyYWluJTIwYW5kJTIwYSUyMHJpdmVyJTIwdmlzaWJsZSUyMGluJTIwdGhlJTIwZGlzdGFuY2UuJTIwVGhlJTIwc2NlbmUlMjBjYXB0dXJlcyUyMHRoZSUyMHNvbGl0dWRlJTIwYW5kJTIwYmVhdXR5JTIwb2YlMjBhJTIwd2ludGVyJTIwZHJpdmUlMjB0aHJvdWdoJTIwYSUyMG1vdW50YWlub3VzJTIwcmVnaW9uLiUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEFleHBlY3RlZF9oZWlnaHQlMkMlMjBleHBlY3RlZF93aWR0aCUyMCUzRCUyMDc2OCUyQyUyMDExNTIlMEFkb3duc2NhbGVfZmFjdG9yJTIwJTNEJTIwMiUyMCUyRiUyMDMlMEFudW1fZnJhbWVzJTIwJTNEJTIwMTYxJTBBJTBBJTIzJTIwUGFydCUyMDEuJTIwR2VuZXJhdGUlMjB2aWRlbyUyMGF0JTIwc21hbGxlciUyMHJlc29sdXRpb24lMEElMjMlMjBUZXh0LW9ubHklMjBjb25kaXRpb25pbmclMjBpcyUyMGFsc28lMjBzdXBwb3J0ZWQlMjB3aXRob3V0JTIwdGhlJTIwbmVlZCUyMHRvJTIwcGFzcyUyMCU2MGNvbmRpdGlvbnMlNjAlMEFkb3duc2NhbGVkX2hlaWdodCUyQyUyMGRvd25zY2FsZWRfd2lkdGglMjAlM0QlMjBpbnQoZXhwZWN0ZWRfaGVpZ2h0JTIwKiUyMGRvd25zY2FsZV9mYWN0b3IpJTJDJTIwaW50KGV4cGVjdGVkX3dpZHRoJTIwKiUyMGRvd25zY2FsZV9mYWN0b3IpJTBBZG93bnNjYWxlZF9oZWlnaHQlMkMlMjBkb3duc2NhbGVkX3dpZHRoJTIwJTNEJTIwcm91bmRfdG9fbmVhcmVzdF9yZXNvbHV0aW9uX2FjY2VwdGFibGVfYnlfdmFlKGRvd25zY2FsZWRfaGVpZ2h0JTJDJTIwZG93bnNjYWxlZF93aWR0aCklMEFsYXRlbnRzJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBjb25kaXRpb25zJTNEJTVCY29uZGl0aW9uMSU1RCUyQyUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEZG93bnNjYWxlZF93aWR0aCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRGRvd25zY2FsZWRfaGVpZ2h0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRG51bV9mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoKS5tYW51YWxfc2VlZCgwKSUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIybGF0ZW50JTIyJTJDJTBBKS5mcmFtZXMlMEElMEElMjMlMjBQYXJ0JTIwMi4lMjBVcHNjYWxlJTIwZ2VuZXJhdGVkJTIwdmlkZW8lMjB1c2luZyUyMGxhdGVudCUyMHVwc2FtcGxlciUyMHdpdGglMjBmZXdlciUyMGluZmVyZW5jZSUyMHN0ZXBzJTBBJTIzJTIwVGhlJTIwYXZhaWxhYmxlJTIwbGF0ZW50JTIwdXBzYW1wbGVyJTIwdXBzY2FsZXMlMjB0aGUlMjBoZWlnaHQlMkZ3aWR0aCUyMGJ5JTIwMnglMEF1cHNjYWxlZF9oZWlnaHQlMkMlMjB1cHNjYWxlZF93aWR0aCUyMCUzRCUyMGRvd25zY2FsZWRfaGVpZ2h0JTIwKiUyMDIlMkMlMjBkb3duc2NhbGVkX3dpZHRoJTIwKiUyMDIlMEF1cHNjYWxlZF9sYXRlbnRzJTIwJTNEJTIwcGlwZV91cHNhbXBsZSglMEElMjAlMjAlMjAlMjBsYXRlbnRzJTNEbGF0ZW50cyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIybGF0ZW50JTIyJTBBKS5mcmFtZXMlMEElMEElMjMlMjBQYXJ0JTIwMy4lMjBEZW5vaXNlJTIwdGhlJTIwdXBzY2FsZWQlMjB2aWRlbyUyMHdpdGglMjBmZXclMjBzdGVwcyUyMHRvJTIwaW1wcm92ZSUyMHRleHR1cmUlMjAob3B0aW9uYWwlMkMlMjBidXQlMjByZWNvbW1lbmRlZCklMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwY29uZGl0aW9ucyUzRCU1QmNvbmRpdGlvbjElNUQlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRHVwc2NhbGVkX3dpZHRoJTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEdXBzY2FsZWRfaGVpZ2h0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2ZyYW1lcyUzRG51bV9mcmFtZXMlMkMlMEElMjAlMjAlMjAlMjBkZW5vaXNlX3N0cmVuZ3RoJTNEMC40JTJDJTIwJTIwJTIzJTIwRWZmZWN0aXZlbHklMkMlMjA0JTIwaW5mZXJlbmNlJTIwc3RlcHMlMjBvdXQlMjBvZiUyMDEwJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDEwJTJDJTBBJTIwJTIwJTIwJTIwbGF0ZW50cyUzRHVwc2NhbGVkX2xhdGVudHMlMkMlMEElMjAlMjAlMjAlMjBkZWNvZGVfdGltZXN0ZXAlM0QwLjA1JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2VfY29uZF9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMCklMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnBpbCUyMiUyQyUwQSkuZnJhbWVzJTVCMCU1RCUwQSUwQSUyMyUyMFBhcnQlMjA0LiUyMERvd25zY2FsZSUyMHRoZSUyMHZpZGVvJTIwdG8lMjB0aGUlMjBleHBlY3RlZCUyMHJlc29sdXRpb24lMEF2aWRlbyUyMCUzRCUyMCU1QmZyYW1lLnJlc2l6ZSgoZXhwZWN0ZWRfd2lkdGglMkMlMjBleHBlY3RlZF9oZWlnaHQpKSUyMGZvciUyMGZyYW1lJTIwaW4lMjB2aWRlbyU1RCUwQSUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMm91dHB1dC5tcDQlMjIlMkMlMjBmcHMlM0QyNCk=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXConditionPipeline, LTXLatentUpsamplePipeline | |
| <span class="hljs-keyword">from</span> diffusers.pipelines.ltx.pipeline_ltx_condition <span class="hljs-keyword">import</span> LTXVideoCondition | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video, load_video | |
| pipe = LTXConditionPipeline.from_pretrained(<span class="hljs-string">"a-r-r-o-w/LTX-Video-0.9.7-diffusers"</span>, torch_dtype=torch.bfloat16) | |
| pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained(<span class="hljs-string">"a-r-r-o-w/LTX-Video-0.9.7-Latent-Spatial-Upsampler-diffusers"</span>, vae=pipe.vae, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| pipe_upsample.to(<span class="hljs-string">"cuda"</span>) | |
| pipe.vae.enable_tiling() | |
| <span class="hljs-keyword">def</span> <span class="hljs-title function_">round_to_nearest_resolution_acceptable_by_vae</span>(<span class="hljs-params">height, width</span>): | |
| height = height - (height % pipe.vae_temporal_compression_ratio) | |
| width = width - (width % pipe.vae_temporal_compression_ratio) | |
| <span class="hljs-keyword">return</span> height, width | |
| video = load_video( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4"</span> | |
| )[:<span class="hljs-number">21</span>] <span class="hljs-comment"># Use only the first 21 frames as conditioning</span> | |
| condition1 = LTXVideoCondition(video=video, frame_index=<span class="hljs-number">0</span>) | |
| prompt = <span class="hljs-string">"The video depicts a winding mountain road covered in snow, with a single vehicle traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. The landscape is characterized by rugged terrain and a river visible in the distance. The scene captures the solitude and beauty of a winter drive through a mountainous region."</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| expected_height, expected_width = <span class="hljs-number">768</span>, <span class="hljs-number">1152</span> | |
| downscale_factor = <span class="hljs-number">2</span> / <span class="hljs-number">3</span> | |
| num_frames = <span class="hljs-number">161</span> | |
| <span class="hljs-comment"># Part 1. Generate video at smaller resolution</span> | |
| <span class="hljs-comment"># Text-only conditioning is also supported without the need to pass \`conditions\`</span> | |
| downscaled_height, downscaled_width = <span class="hljs-built_in">int</span>(expected_height * downscale_factor), <span class="hljs-built_in">int</span>(expected_width * downscale_factor) | |
| downscaled_height, downscaled_width = round_to_nearest_resolution_acceptable_by_vae(downscaled_height, downscaled_width) | |
| latents = pipe( | |
| conditions=[condition1], | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=downscaled_width, | |
| height=downscaled_height, | |
| num_frames=num_frames, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"latent"</span>, | |
| ).frames | |
| <span class="hljs-comment"># Part 2. Upscale generated video using latent upsampler with fewer inference steps</span> | |
| <span class="hljs-comment"># The available latent upsampler upscales the height/width by 2x</span> | |
| upscaled_height, upscaled_width = downscaled_height * <span class="hljs-number">2</span>, downscaled_width * <span class="hljs-number">2</span> | |
| upscaled_latents = pipe_upsample( | |
| latents=latents, | |
| output_type=<span class="hljs-string">"latent"</span> | |
| ).frames | |
| <span class="hljs-comment"># Part 3. Denoise the upscaled video with few steps to improve texture (optional, but recommended)</span> | |
| video = pipe( | |
| conditions=[condition1], | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=upscaled_width, | |
| height=upscaled_height, | |
| num_frames=num_frames, | |
| denoise_strength=<span class="hljs-number">0.4</span>, <span class="hljs-comment"># Effectively, 4 inference steps out of 10</span> | |
| num_inference_steps=<span class="hljs-number">10</span>, | |
| latents=upscaled_latents, | |
| decode_timestep=<span class="hljs-number">0.05</span>, | |
| image_cond_noise_scale=<span class="hljs-number">0.025</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">0</span>), | |
| output_type=<span class="hljs-string">"pil"</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># Part 4. Downscale the video to the expected resolution</span> | |
| video = [frame.resize((expected_width, expected_height)) <span class="hljs-keyword">for</span> frame <span class="hljs-keyword">in</span> video] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),ce=new R({props:{title:"Loading Single Files",local:"loading-single-files",headingTag:"h2"}}),ge=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQXV0b2VuY29kZXJLTExUWFZpZGVvJTJDJTIwTFRYSW1hZ2VUb1ZpZGVvUGlwZWxpbmUlMkMlMjBMVFhWaWRlb1RyYW5zZm9ybWVyM0RNb2RlbCUwQSUwQSUyMyUyMCU2MHNpbmdsZV9maWxlX3VybCU2MCUyMGNvdWxkJTIwYWxzbyUyMGJlJTIwaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8lMkZsdHgtdmlkZW8tMmItdjAuOS4xLnNhZmV0ZW5zb3JzJTBBc2luZ2xlX2ZpbGVfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8lMkZsdHgtdmlkZW8tMmItdjAuOS5zYWZldGVuc29ycyUyMiUwQXRyYW5zZm9ybWVyJTIwJTNEJTIwTFRYVmlkZW9UcmFuc2Zvcm1lcjNETW9kZWwuZnJvbV9zaW5nbGVfZmlsZSglMEElMjAlMjBzaW5nbGVfZmlsZV91cmwlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXZhZSUyMCUzRCUyMEF1dG9lbmNvZGVyS0xMVFhWaWRlby5mcm9tX3NpbmdsZV9maWxlKHNpbmdsZV9maWxlX3VybCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZSUyMCUzRCUyMExUWEltYWdlVG9WaWRlb1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTIwdHJhbnNmb3JtZXIlM0R0cmFuc2Zvcm1lciUyQyUyMHZhZSUzRHZhZSUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBJTBBJTIzJTIwLi4uJTIwaW5mZXJlbmNlJTIwY29kZSUyMC4uLg==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel | |
| <span class="hljs-comment"># \`single_file_url\` could also be https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.1.safetensors</span> | |
| single_file_url = <span class="hljs-string">"https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors"</span> | |
| transformer = LTXVideoTransformer3DModel.from_single_file( | |
| single_file_url, torch_dtype=torch.bfloat16 | |
| ) | |
| vae = AutoencoderKLLTXVideo.from_single_file(single_file_url, torch_dtype=torch.bfloat16) | |
| pipe = LTXImageToVideoPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16 | |
| ) | |
| <span class="hljs-comment"># ... inference code ...</span>`,wrap:!1}}),ue=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYSW1hZ2VUb1ZpZGVvUGlwZWxpbmUlMEFmcm9tJTIwdHJhbnNmb3JtZXJzJTIwaW1wb3J0JTIwVDVFbmNvZGVyTW9kZWwlMkMlMjBUNVRva2VuaXplciUwQSUwQXNpbmdsZV9maWxlX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTJGbHR4LXZpZGVvLTJiLXYwLjkuc2FmZXRlbnNvcnMlMjIlMEF0ZXh0X2VuY29kZXIlMjAlM0QlMjBUNUVuY29kZXJNb2RlbC5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBdG9rZW5pemVyJTIwJTNEJTIwVDVUb2tlbml6ZXIuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8lMjIlMkMlMjBzdWJmb2xkZXIlM0QlMjJ0b2tlbml6ZXIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUlMjAlM0QlMjBMVFhJbWFnZVRvVmlkZW9QaXBlbGluZS5mcm9tX3NpbmdsZV9maWxlKCUwQSUyMCUyMHNpbmdsZV9maWxlX3VybCUyQyUyMHRleHRfZW5jb2RlciUzRHRleHRfZW5jb2RlciUyQyUyMHRva2VuaXplciUzRHRva2VuaXplciUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXImageToVideoPipeline | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel, T5Tokenizer | |
| single_file_url = <span class="hljs-string">"https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors"</span> | |
| text_encoder = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, subfolder=<span class="hljs-string">"text_encoder"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| tokenizer = T5Tokenizer.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, subfolder=<span class="hljs-string">"tokenizer"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| pipe = LTXImageToVideoPipeline.from_single_file( | |
| single_file_url, text_encoder=text_encoder, tokenizer=tokenizer, torch_dtype=torch.bfloat16 | |
| )`,wrap:!1}}),ye=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMExUWFBpcGVsaW5lJTJDJTIwTFRYVmlkZW9UcmFuc2Zvcm1lcjNETW9kZWwlMkMlMjBHR1VGUXVhbnRpemF0aW9uQ29uZmlnJTBBJTBBY2twdF9wYXRoJTIwJTNEJTIwKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZjaXR5OTYlMkZMVFgtVmlkZW8tZ2d1ZiUyRmJsb2IlMkZtYWluJTJGbHR4LXZpZGVvLTJiLXYwLjktUTNfS19TLmdndWYlMjIlMEEpJTBBdHJhbnNmb3JtZXIlMjAlM0QlMjBMVFhWaWRlb1RyYW5zZm9ybWVyM0RNb2RlbC5mcm9tX3NpbmdsZV9maWxlKCUwQSUyMCUyMCUyMCUyMGNrcHRfcGF0aCUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RHR1VGUXVhbnRpemF0aW9uQ29uZmlnKGNvbXB1dGVfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQXBpcGUlMjAlM0QlMjBMVFhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUwQSUyMCUyMCUyMCUyMHRyYW5zZm9ybWVyJTNEdHJhbnNmb3JtZXIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwd29tYW4lMjB3aXRoJTIwbG9uZyUyMGJyb3duJTIwaGFpciUyMGFuZCUyMGxpZ2h0JTIwc2tpbiUyMHNtaWxlcyUyMGF0JTIwYW5vdGhlciUyMHdvbWFuJTIwd2l0aCUyMGxvbmclMjBibG9uZGUlMjBoYWlyLiUyMFRoZSUyMHdvbWFuJTIwd2l0aCUyMGJyb3duJTIwaGFpciUyMHdlYXJzJTIwYSUyMGJsYWNrJTIwamFja2V0JTIwYW5kJTIwaGFzJTIwYSUyMHNtYWxsJTJDJTIwYmFyZWx5JTIwbm90aWNlYWJsZSUyMG1vbGUlMjBvbiUyMGhlciUyMHJpZ2h0JTIwY2hlZWsuJTIwVGhlJTIwY2FtZXJhJTIwYW5nbGUlMjBpcyUyMGElMjBjbG9zZS11cCUyQyUyMGZvY3VzZWQlMjBvbiUyMHRoZSUyMHdvbWFuJTIwd2l0aCUyMGJyb3duJTIwaGFpcidzJTIwZmFjZS4lMjBUaGUlMjBsaWdodGluZyUyMGlzJTIwd2FybSUyMGFuZCUyMG5hdHVyYWwlMkMlMjBsaWtlbHklMjBmcm9tJTIwdGhlJTIwc2V0dGluZyUyMHN1biUyQyUyMGNhc3RpbmclMjBhJTIwc29mdCUyMGdsb3clMjBvbiUyMHRoZSUyMHNjZW5lLiUyMFRoZSUyMHNjZW5lJTIwYXBwZWFycyUyMHRvJTIwYmUlMjByZWFsLWxpZmUlMjBmb290YWdlJTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIyd29yc3QlMjBxdWFsaXR5JTJDJTIwaW5jb25zaXN0ZW50JTIwbW90aW9uJTJDJTIwYmx1cnJ5JTJDJTIwaml0dGVyeSUyQyUyMGRpc3RvcnRlZCUyMiUwQSUwQXZpZGVvJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDcwNCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDQ4MCUyQyUwQSUyMCUyMCUyMCUyMG51bV9mcmFtZXMlM0QxNjElMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEEpLmZyYW1lcyU1QjAlNUQlMEFleHBvcnRfdG9fdmlkZW8odmlkZW8lMkMlMjAlMjJvdXRwdXRfZ2d1Zl9sdHgubXA0JTIyJTJDJTIwZnBzJTNEMjQp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline, LTXVideoTransformer3DModel, GGUFQuantizationConfig | |
| ckpt_path = ( | |
| <span class="hljs-string">"https://huggingface.co/city96/LTX-Video-gguf/blob/main/ltx-video-2b-v0.9-Q3_K_S.gguf"</span> | |
| ) | |
| transformer = LTXVideoTransformer3DModel.from_single_file( | |
| ckpt_path, | |
| quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipe = LTXPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| transformer=transformer, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipe.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| video = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=<span class="hljs-number">704</span>, | |
| height=<span class="hljs-number">480</span>, | |
| num_frames=<span class="hljs-number">161</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"output_gguf_ltx.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),Te=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTFRYUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwZXhwb3J0X3RvX3ZpZGVvJTBBJTBBcGlwZSUyMCUzRCUyMExUWFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJhLXItci1vLXclMkZMVFgtVmlkZW8tMC45LjEtZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHdvbWFuJTIwd2l0aCUyMGxvbmclMjBicm93biUyMGhhaXIlMjBhbmQlMjBsaWdodCUyMHNraW4lMjBzbWlsZXMlMjBhdCUyMGFub3RoZXIlMjB3b21hbiUyMHdpdGglMjBsb25nJTIwYmxvbmRlJTIwaGFpci4lMjBUaGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIlMjB3ZWFycyUyMGElMjBibGFjayUyMGphY2tldCUyMGFuZCUyMGhhcyUyMGElMjBzbWFsbCUyQyUyMGJhcmVseSUyMG5vdGljZWFibGUlMjBtb2xlJTIwb24lMjBoZXIlMjByaWdodCUyMGNoZWVrLiUyMFRoZSUyMGNhbWVyYSUyMGFuZ2xlJTIwaXMlMjBhJTIwY2xvc2UtdXAlMkMlMjBmb2N1c2VkJTIwb24lMjB0aGUlMjB3b21hbiUyMHdpdGglMjBicm93biUyMGhhaXIncyUyMGZhY2UuJTIwVGhlJTIwbGlnaHRpbmclMjBpcyUyMHdhcm0lMjBhbmQlMjBuYXR1cmFsJTJDJTIwbGlrZWx5JTIwZnJvbSUyMHRoZSUyMHNldHRpbmclMjBzdW4lMkMlMjBjYXN0aW5nJTIwYSUyMHNvZnQlMjBnbG93JTIwb24lMjB0aGUlMjBzY2VuZS4lMjBUaGUlMjBzY2VuZSUyMGFwcGVhcnMlMjB0byUyMGJlJTIwcmVhbC1saWZlJTIwZm9vdGFnZSUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMndvcnN0JTIwcXVhbGl0eSUyQyUyMGluY29uc2lzdGVudCUyMG1vdGlvbiUyQyUyMGJsdXJyeSUyQyUyMGppdHRlcnklMkMlMjBkaXN0b3J0ZWQlMjIlMEElMEF2aWRlbyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q3NjglMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q1MTIlMkMlMEElMjAlMjAlMjAlMjBudW1fZnJhbWVzJTNEMTYxJTJDJTBBJTIwJTIwJTIwJTIwZGVjb2RlX3RpbWVzdGVwJTNEMC4wMyUyQyUwQSUyMCUyMCUyMCUyMGRlY29kZV9ub2lzZV9zY2FsZSUzRDAuMDI1JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBKS5mcmFtZXMlNUIwJTVEJTBBZXhwb3J0X3RvX3ZpZGVvKHZpZGVvJTJDJTIwJTIyb3V0cHV0Lm1wNCUyMiUyQyUyMGZwcyUzRDI0KQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LTXPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| pipe = LTXPipeline.from_pretrained(<span class="hljs-string">"a-r-r-o-w/LTX-Video-0.9.1-diffusers"</span>, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"</span> | |
| negative_prompt = <span class="hljs-string">"worst quality, inconsistent motion, blurry, jittery, distorted"</span> | |
| video = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=<span class="hljs-number">768</span>, | |
| height=<span class="hljs-number">512</span>, | |
| num_frames=<span class="hljs-number">161</span>, | |
| decode_timestep=<span class="hljs-number">0.03</span>, | |
| decode_noise_scale=<span class="hljs-number">0.025</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"output.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),we=new R({props:{title:"Quantization",local:"quantization",headingTag:"h2"}}),Ue=new H({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQml0c0FuZEJ5dGVzQ29uZmlnJTIwYXMlMjBEaWZmdXNlcnNCaXRzQW5kQnl0ZXNDb25maWclMkMlMjBMVFhWaWRlb1RyYW5zZm9ybWVyM0RNb2RlbCUyQyUyMExUWFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGV4cG9ydF90b192aWRlbyUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBCaXRzQW5kQnl0ZXNDb25maWclMjBhcyUyMEJpdHNBbmRCeXRlc0NvbmZpZyUyQyUyMFQ1RW5jb2Rlck1vZGVsJTBBJTBBcXVhbnRfY29uZmlnJTIwJTNEJTIwQml0c0FuZEJ5dGVzQ29uZmlnKGxvYWRfaW5fOGJpdCUzRFRydWUpJTBBdGV4dF9lbmNvZGVyXzhiaXQlMjAlM0QlMjBUNUVuY29kZXJNb2RlbC5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyTGlnaHRyaWNrcyUyRkxUWC1WaWRlbyUyMiUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpJTBBJTBBcXVhbnRfY29uZmlnJTIwJTNEJTIwRGlmZnVzZXJzQml0c0FuZEJ5dGVzQ29uZmlnKGxvYWRfaW5fOGJpdCUzRFRydWUpJTBBdHJhbnNmb3JtZXJfOGJpdCUyMCUzRCUyMExUWFZpZGVvVHJhbnNmb3JtZXIzRE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJMaWdodHJpY2tzJTJGTFRYLVZpZGVvJTIyJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIydHJhbnNmb3JtZXIlMjIlMkMlMEElMjAlMjAlMjAlMjBxdWFudGl6YXRpb25fY29uZmlnJTNEcXVhbnRfY29uZmlnJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQSUwQXBpcGVsaW5lJTIwJTNEJTIwTFRYUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkxpZ2h0cmlja3MlMkZMVFgtVmlkZW8lMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXIlM0R0ZXh0X2VuY29kZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRyYW5zZm9ybWVyJTNEdHJhbnNmb3JtZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJiYWxhbmNlZCUyMiUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZGV0YWlsZWQlMjB3b29kZW4lMjB0b3klMjBzaGlwJTIwd2l0aCUyMGludHJpY2F0ZWx5JTIwY2FydmVkJTIwbWFzdHMlMjBhbmQlMjBzYWlscyUyMGlzJTIwc2VlbiUyMGdsaWRpbmclMjBzbW9vdGhseSUyMG92ZXIlMjBhJTIwcGx1c2glMkMlMjBibHVlJTIwY2FycGV0JTIwdGhhdCUyMG1pbWljcyUyMHRoZSUyMHdhdmVzJTIwb2YlMjB0aGUlMjBzZWEuJTIwVGhlJTIwc2hpcCdzJTIwaHVsbCUyMGlzJTIwcGFpbnRlZCUyMGElMjByaWNoJTIwYnJvd24lMkMlMjB3aXRoJTIwdGlueSUyMHdpbmRvd3MuJTIwVGhlJTIwY2FycGV0JTJDJTIwc29mdCUyMGFuZCUyMHRleHR1cmVkJTJDJTIwcHJvdmlkZXMlMjBhJTIwcGVyZmVjdCUyMGJhY2tkcm9wJTJDJTIwcmVzZW1ibGluZyUyMGFuJTIwb2NlYW5pYyUyMGV4cGFuc2UuJTIwU3Vycm91bmRpbmclMjB0aGUlMjBzaGlwJTIwYXJlJTIwdmFyaW91cyUyMG90aGVyJTIwdG95cyUyMGFuZCUyMGNoaWxkcmVuJ3MlMjBpdGVtcyUyQyUyMGhpbnRpbmclMjBhdCUyMGElMjBwbGF5ZnVsJTIwZW52aXJvbm1lbnQuJTIwVGhlJTIwc2NlbmUlMjBjYXB0dXJlcyUyMHRoZSUyMGlubm9jZW5jZSUyMGFuZCUyMGltYWdpbmF0aW9uJTIwb2YlMjBjaGlsZGhvb2QlMkMlMjB3aXRoJTIwdGhlJTIwdG95JTIwc2hpcCdzJTIwam91cm5leSUyMHN5bWJvbGl6aW5nJTIwZW5kbGVzcyUyMGFkdmVudHVyZXMlMjBpbiUyMGElMjB3aGltc2ljYWwlMkMlMjBpbmRvb3IlMjBzZXR0aW5nLiUyMiUwQXZpZGVvJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTNEcHJvbXB0JTJDJTIwbnVtX2ZyYW1lcyUzRDE2MSUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCkuZnJhbWVzJTVCMCU1RCUwQWV4cG9ydF90b192aWRlbyh2aWRlbyUyQyUyMCUyMnNoaXAubXA0JTIyJTJDJTIwZnBzJTNEMjQp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> DiffusersBitsAndBytesConfig, LTXVideoTransformer3DModel, LTXPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> export_to_video | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> BitsAndBytesConfig, T5EncoderModel | |
| quant_config = BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| text_encoder_8bit = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| subfolder=<span class="hljs-string">"text_encoder"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.float16, | |
| ) | |
| quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| transformer_8bit = LTXVideoTransformer3DModel.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.float16, | |
| ) | |
| pipeline = LTXPipeline.from_pretrained( | |
| <span class="hljs-string">"Lightricks/LTX-Video"</span>, | |
| text_encoder=text_encoder_8bit, | |
| transformer=transformer_8bit, | |
| torch_dtype=torch.float16, | |
| device_map=<span class="hljs-string">"balanced"</span>, | |
| ) | |
| prompt = <span class="hljs-string">"A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting."</span> | |
| video = pipeline(prompt=prompt, num_frames=<span class="hljs-number">161</span>, num_inference_steps=<span class="hljs-number">50</span>).frames[<span class="hljs-number">0</span>] | |
| export_to_video(video, <span class="hljs-string">"ship.mp4"</span>, fps=<span class="hljs-number">24</span>)`,wrap:!1}}),Ze=new R({props:{title:"LTXPipeline",local:"diffusers.LTXPipeline",headingTag:"h2"}}),je=new U({props:{name:"class diffusers.LTXPipeline",anchor:"diffusers.LTXPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11477/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx.py#L143"}}),Ie=new U({props:{name:"__call__",anchor:"diffusers.LTXPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 128"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>. | |
| <code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen | |
| Paper</a>. Guidance scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>, | |
| usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx.py#L504",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),E=new Dn({props:{anchor:"diffusers.LTXPipeline.__call__.example",$$slots:{default:[Eo]},$$scope:{ctx:k}}}),Xe=new U({props:{name:"encode_prompt",anchor:"diffusers.LTXPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 128"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx.py#L256"}}),Ge=new R({props:{title:"LTXImageToVideoPipeline",local:"diffusers.LTXImageToVideoPipeline",headingTag:"h2"}}),xe=new U({props:{name:"class diffusers.LTXImageToVideoPipeline",anchor:"diffusers.LTXImageToVideoPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXImageToVideoPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11477/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXImageToVideoPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXImageToVideoPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXImageToVideoPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXImageToVideoPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L162"}}),Be=new U({props:{name:"__call__",anchor:"diffusers.LTXImageToVideoPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 128"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.__call__.image",description:`<strong>image</strong> (<code>PipelineImageInput</code>) — | |
| The input image to condition the generation on. Must be an image, a list of images or a <code>torch.Tensor</code>.`,name:"image"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>. | |
| <code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen | |
| Paper</a>. Guidance scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>, | |
| usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXImageToVideoPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L565",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Y=new Dn({props:{anchor:"diffusers.LTXImageToVideoPipeline.__call__.example",$$slots:{default:[zo]},$$scope:{ctx:k}}}),Ve=new U({props:{name:"encode_prompt",anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 128"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXImageToVideoPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py#L279"}}),ke=new R({props:{title:"LTXConditionPipeline",local:"diffusers.LTXConditionPipeline",headingTag:"h2"}}),We=new U({props:{name:"class diffusers.LTXConditionPipeline",anchor:"diffusers.LTXConditionPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": LTXVideoTransformer3DModel"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/ltx_video_transformer3d#diffusers.LTXVideoTransformer3DModel">LTXVideoTransformer3DModel</a>) — | |
| Conditional Transformer architecture to denoise the encoded video latents.`,name:"transformer"},{anchor:"diffusers.LTXConditionPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11477/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.LTXConditionPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/autoencoderkl_ltx_video#diffusers.AutoencoderKLLTXVideo">AutoencoderKLLTXVideo</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LTXConditionPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LTXConditionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LTXConditionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L225"}}),Le=new U({props:{name:"__call__",anchor:"diffusers.LTXConditionPipeline.__call__",parameters:[{name:"conditions",val:": typing.Union[diffusers.pipelines.ltx.pipeline_ltx_condition.LTXVideoCondition, typing.List[diffusers.pipelines.ltx.pipeline_ltx_condition.LTXVideoCondition]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]]] = None"},{name:"video",val:": typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]] = None"},{name:"frame_index",val:": typing.Union[int, typing.List[int]] = 0"},{name:"strength",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"denoise_strength",val:": float = 1.0"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"num_frames",val:": int = 161"},{name:"frame_rate",val:": int = 25"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 3"},{name:"image_cond_noise_scale",val:": float = 0.15"},{name:"num_videos_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 256"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.__call__.conditions",description:`<strong>conditions</strong> (<code>List[LTXVideoCondition], *optional*</code>) — | |
| The list of frame-conditioning items for the video generation.If not provided, conditions will be | |
| created using <code>image</code>, <code>video</code>, <code>frame_index</code> and <code>strength</code>.`,name:"conditions"},{anchor:"diffusers.LTXConditionPipeline.__call__.image",description:`<strong>image</strong> (<code>PipelineImageInput</code> or <code>List[PipelineImageInput]</code>, <em>optional</em>) — | |
| The image or images to condition the video generation. If not provided, one has to pass <code>video</code> or | |
| <code>conditions</code>.`,name:"image"},{anchor:"diffusers.LTXConditionPipeline.__call__.video",description:`<strong>video</strong> (<code>List[PipelineImageInput]</code>, <em>optional</em>) — | |
| The video to condition the video generation. If not provided, one has to pass <code>image</code> or <code>conditions</code>.`,name:"video"},{anchor:"diffusers.LTXConditionPipeline.__call__.frame_index",description:`<strong>frame_index</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>) — | |
| The frame index or frame indices at which the image or video will conditionally effect the video | |
| generation. If not provided, one has to pass <code>conditions</code>.`,name:"frame_index"},{anchor:"diffusers.LTXConditionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>) — | |
| The strength or strengths of the conditioning effect. If not provided, one has to pass <code>conditions</code>.`,name:"strength"},{anchor:"diffusers.LTXConditionPipeline.__call__.denoise_strength",description:`<strong>denoise_strength</strong> (<code>float</code>, defaults to <code>1.0</code>) — | |
| The strength of the noise added to the latents for editing. Higher strength leads to more noise added | |
| to the latents, therefore leading to more differences between original video and generated video. This | |
| is useful for video-to-video editing.`,name:"denoise_strength"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.LTXConditionPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, defaults to <code>512</code>) — | |
| The height in pixels of the generated image. This is set to 480 by default for the best results.`,name:"height"},{anchor:"diffusers.LTXConditionPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, defaults to <code>704</code>) — | |
| The width in pixels of the generated image. This is set to 848 by default for the best results.`,name:"width"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_frames",description:`<strong>num_frames</strong> (<code>int</code>, defaults to <code>161</code>) — | |
| The number of video frames to generate`,name:"num_frames"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.LTXConditionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.LTXConditionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, defaults to <code>3 </code>) — | |
| Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>. | |
| <code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen | |
| Paper</a>. Guidance scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>, | |
| usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.LTXConditionPipeline.__call__.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of videos to generate per prompt.`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXConditionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LTXConditionPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for text embeddings.`,name:"prompt_attention_mask"},{anchor:"diffusers.LTXConditionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.LTXConditionPipeline.__call__.decode_timestep",description:`<strong>decode_timestep</strong> (<code>float</code>, defaults to <code>0.0</code>) — | |
| The timestep at which generated video is decoded.`,name:"decode_timestep"},{anchor:"diffusers.LTXConditionPipeline.__call__.decode_noise_scale",description:`<strong>decode_noise_scale</strong> (<code>float</code>, defaults to <code>None</code>) — | |
| The interpolation factor between random noise and denoised latents at the decode timestep.`,name:"decode_noise_scale"},{anchor:"diffusers.LTXConditionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LTXConditionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.ltx.LTXPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LTXConditionPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.LTXConditionPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LTXConditionPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.LTXConditionPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>128 </code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L817",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <code>~pipelines.ltx.LTXPipelineOutput</code> is returned, otherwise a <code>tuple</code> is | |
| returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.ltx.LTXPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),P=new Dn({props:{anchor:"diffusers.LTXConditionPipeline.__call__.example",$$slots:{default:[Yo]},$$scope:{ctx:k}}}),Ce=new U({props:{name:"add_noise_to_image_conditioning_latents",anchor:"diffusers.LTXConditionPipeline.add_noise_to_image_conditioning_latents",parameters:[{name:"t",val:": float"},{name:"init_latents",val:": Tensor"},{name:"latents",val:": Tensor"},{name:"noise_scale",val:": float"},{name:"conditioning_mask",val:": Tensor"},{name:"generator",val:""},{name:"eps",val:" = 1e-06"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L619"}}),Re=new U({props:{name:"encode_prompt",anchor:"diffusers.LTXConditionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_videos_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 256"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.num_videos_per_prompt",description:`<strong>num_videos_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| Number of videos that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_videos_per_prompt"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device`,name:"device"},{anchor:"diffusers.LTXConditionPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> — (<code>torch.dtype</code>, <em>optional</em>): | |
| torch dtype`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L342"}}),Ne=new U({props:{name:"trim_conditioning_sequence",anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence",parameters:[{name:"start_frame",val:": int"},{name:"sequence_num_frames",val:": int"},{name:"target_num_frames",val:": int"}],parametersDescription:[{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.start_frame",description:"<strong>start_frame</strong> (int) — The target frame number of the first frame in the sequence.",name:"start_frame"},{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.sequence_num_frames",description:"<strong>sequence_num_frames</strong> (int) — The number of frames in the sequence.",name:"sequence_num_frames"},{anchor:"diffusers.LTXConditionPipeline.trim_conditioning_sequence.target_num_frames",description:"<strong>target_num_frames</strong> (int) — The target number of frames in the generated video.",name:"target_num_frames"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_condition.py#L602",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>updated sequence length</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>int</p> | |
| `}}),Fe=new R({props:{title:"LTXLatentUpsamplePipeline",local:"diffusers.LTXLatentUpsamplePipeline",headingTag:"h2"}}),He=new U({props:{name:"class diffusers.LTXLatentUpsamplePipeline",anchor:"diffusers.LTXLatentUpsamplePipeline",parameters:[{name:"vae",val:": AutoencoderKLLTXVideo"},{name:"latent_upsampler",val:": LTXLatentUpsamplerModel"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L46"}}),$e=new U({props:{name:"__call__",anchor:"diffusers.LTXLatentUpsamplePipeline.__call__",parameters:[{name:"video",val:": typing.Optional[typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]]] = None"},{name:"height",val:": int = 512"},{name:"width",val:": int = 704"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_timestep",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"decode_noise_scale",val:": typing.Union[float, typing.List[float], NoneType] = None"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L154"}}),Ee=new U({props:{name:"disable_vae_slicing",anchor:"diffusers.LTXLatentUpsamplePipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L123"}}),ze=new U({props:{name:"disable_vae_tiling",anchor:"diffusers.LTXLatentUpsamplePipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L138"}}),Ye=new U({props:{name:"enable_vae_slicing",anchor:"diffusers.LTXLatentUpsamplePipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L116"}}),Qe=new U({props:{name:"enable_vae_tiling",anchor:"diffusers.LTXLatentUpsamplePipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_ltx_latent_upsample.py#L130"}}),Pe=new R({props:{title:"LTXPipelineOutput",local:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",headingTag:"h2"}}),Se=new U({props:{name:"class diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",anchor:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput",parameters:[{name:"frames",val:": Tensor"}],parametersDescription:[{anchor:"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput.frames",description:`<strong>frames</strong> (<code>torch.Tensor</code>, <code>np.ndarray</code>, or List[List[PIL.Image.Image]]) — | |
| List of video outputs - It can be a nested list of length <code>batch_size,</code> with each sub-list containing | |
| denoised PIL image sequences of length <code>num_frames.</code> It can also be a NumPy array or Torch tensor of shape | |
| <code>(batch_size, num_frames, channels, height, width)</code>.`,name:"frames"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/ltx/pipeline_output.py#L8"}}),Ae=new Ho({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/ltx_video.md"}}),{c(){f=l("meta"),j=o(),M=l("p"),_=o(),p(T.$$.fragment),d=o(),w=l("div"),w.innerHTML=On,Tt=o(),te=l("p"),te.innerHTML=Kn,bt=o(),p($.$$.fragment),wt=o(),ne=l("p"),ne.textContent=eo,Jt=o(),oe=l("table"),oe.innerHTML=to,vt=o(),se=l("p"),se.innerHTML=no,Ut=o(),p(ae.$$.fragment),Zt=o(),ie=l("p"),ie.innerHTML=oo,jt=o(),le=l("ul"),le.innerHTML=so,It=o(),p(re.$$.fragment),Xt=o(),de=l("p"),de.textContent=ao,Gt=o(),p(pe.$$.fragment),xt=o(),p(ce.$$.fragment),Bt=o(),me=l("p"),me.innerHTML=io,Vt=o(),p(ge.$$.fragment),kt=o(),he=l("p"),he.innerHTML=lo,Wt=o(),p(ue.$$.fragment),Lt=o(),fe=l("p"),fe.innerHTML=ro,Ct=o(),p(ye.$$.fragment),Rt=o(),_e=l("p"),_e.innerHTML=po,Nt=o(),Me=l("p"),Me.innerHTML=co,Ft=o(),p(Te.$$.fragment),Ht=o(),be=l("p"),be.innerHTML=mo,$t=o(),p(we.$$.fragment),Et=o(),Je=l("p"),Je.textContent=go,zt=o(),ve=l("p"),ve.innerHTML=ho,Yt=o(),p(Ue.$$.fragment),Qt=o(),p(Ze.$$.fragment),Pt=o(),I=l("div"),p(je.$$.fragment),fn=o(),De=l("p"),De.textContent=uo,yn=o(),Oe=l("p"),Oe.innerHTML=fo,_n=o(),W=l("div"),p(Ie.$$.fragment),Mn=o(),Ke=l("p"),Ke.textContent=yo,Tn=o(),p(E.$$.fragment),bn=o(),z=l("div"),p(Xe.$$.fragment),wn=o(),et=l("p"),et.textContent=_o,St=o(),p(Ge.$$.fragment),At=o(),X=l("div"),p(xe.$$.fragment),Jn=o(),tt=l("p"),tt.textContent=Mo,vn=o(),nt=l("p"),nt.innerHTML=To,Un=o(),L=l("div"),p(Be.$$.fragment),Zn=o(),ot=l("p"),ot.textContent=bo,jn=o(),p(Y.$$.fragment),In=o(),Q=l("div"),p(Ve.$$.fragment),Xn=o(),st=l("p"),st.textContent=wo,qt=o(),p(ke.$$.fragment),Dt=o(),v=l("div"),p(We.$$.fragment),Gn=o(),at=l("p"),at.textContent=Jo,xn=o(),it=l("p"),it.innerHTML=vo,Bn=o(),C=l("div"),p(Le.$$.fragment),Vn=o(),lt=l("p"),lt.textContent=Uo,kn=o(),p(P.$$.fragment),Wn=o(),S=l("div"),p(Ce.$$.fragment),Ln=o(),rt=l("p"),rt.textContent=Zo,Cn=o(),A=l("div"),p(Re.$$.fragment),Rn=o(),dt=l("p"),dt.textContent=jo,Nn=o(),q=l("div"),p(Ne.$$.fragment),Fn=o(),pt=l("p"),pt.textContent=Io,Ot=o(),p(Fe.$$.fragment),Kt=o(),Z=l("div"),p(He.$$.fragment),Hn=o(),ct=l("div"),p($e.$$.fragment),$n=o(),D=l("div"),p(Ee.$$.fragment),En=o(),mt=l("p"),mt.innerHTML=Xo,zn=o(),O=l("div"),p(ze.$$.fragment),Yn=o(),gt=l("p"),gt.innerHTML=Go,Qn=o(),K=l("div"),p(Ye.$$.fragment),Pn=o(),ht=l("p"),ht.textContent=xo,Sn=o(),ee=l("div"),p(Qe.$$.fragment),An=o(),ut=l("p"),ut.textContent=Bo,en=o(),p(Pe.$$.fragment),tn=o(),N=l("div"),p(Se.$$.fragment),qn=o(),ft=l("p"),ft.textContent=Vo,nn=o(),p(Ae.$$.fragment),on=o(),Mt=l("p"),this.h()},l(e){const t=No("svelte-u9bgzb",document.head);f=r(t,"META",{name:!0,content:!0}),t.forEach(n),j=s(e),M=r(e,"P",{}),J(M).forEach(n),_=s(e),c(T.$$.fragment,e),d=s(e),w=r(e,"DIV",{class:!0,"data-svelte-h":!0}),y(w)!=="svelte-1elo7hh"&&(w.innerHTML=On),Tt=s(e),te=r(e,"P",{"data-svelte-h":!0}),y(te)!=="svelte-1t4cyrb"&&(te.innerHTML=Kn),bt=s(e),c($.$$.fragment,e),wt=s(e),ne=r(e,"P",{"data-svelte-h":!0}),y(ne)!=="svelte-1bob28v"&&(ne.textContent=eo),Jt=s(e),oe=r(e,"TABLE",{"data-svelte-h":!0}),y(oe)!=="svelte-1indfqi"&&(oe.innerHTML=to),vt=s(e),se=r(e,"P",{"data-svelte-h":!0}),y(se)!=="svelte-xu6cps"&&(se.innerHTML=no),Ut=s(e),c(ae.$$.fragment,e),Zt=s(e),ie=r(e,"P",{"data-svelte-h":!0}),y(ie)!=="svelte-1giptmx"&&(ie.innerHTML=oo),jt=s(e),le=r(e,"UL",{"data-svelte-h":!0}),y(le)!=="svelte-e50cei"&&(le.innerHTML=so),It=s(e),c(re.$$.fragment,e),Xt=s(e),de=r(e,"P",{"data-svelte-h":!0}),y(de)!=="svelte-3t8vbu"&&(de.textContent=ao),Gt=s(e),c(pe.$$.fragment,e),xt=s(e),c(ce.$$.fragment,e),Bt=s(e),me=r(e,"P",{"data-svelte-h":!0}),y(me)!=="svelte-lk4l6j"&&(me.innerHTML=io),Vt=s(e),c(ge.$$.fragment,e),kt=s(e),he=r(e,"P",{"data-svelte-h":!0}),y(he)!=="svelte-rvy320"&&(he.innerHTML=lo),Wt=s(e),c(ue.$$.fragment,e),Lt=s(e),fe=r(e,"P",{"data-svelte-h":!0}),y(fe)!=="svelte-1r1x4fd"&&(fe.innerHTML=ro),Ct=s(e),c(ye.$$.fragment,e),Rt=s(e),_e=r(e,"P",{"data-svelte-h":!0}),y(_e)!=="svelte-7vi8uq"&&(_e.innerHTML=po),Nt=s(e),Me=r(e,"P",{"data-svelte-h":!0}),y(Me)!=="svelte-1d7qawf"&&(Me.innerHTML=co),Ft=s(e),c(Te.$$.fragment,e),Ht=s(e),be=r(e,"P",{"data-svelte-h":!0}),y(be)!=="svelte-obf3nv"&&(be.innerHTML=mo),$t=s(e),c(we.$$.fragment,e),Et=s(e),Je=r(e,"P",{"data-svelte-h":!0}),y(Je)!=="svelte-1ou2pxc"&&(Je.textContent=go),zt=s(e),ve=r(e,"P",{"data-svelte-h":!0}),y(ve)!=="svelte-1t0dmz8"&&(ve.innerHTML=ho),Yt=s(e),c(Ue.$$.fragment,e),Qt=s(e),c(Ze.$$.fragment,e),Pt=s(e),I=r(e,"DIV",{class:!0});var x=J(I);c(je.$$.fragment,x),fn=s(x),De=r(x,"P",{"data-svelte-h":!0}),y(De)!=="svelte-19ipoo4"&&(De.textContent=uo),yn=s(x),Oe=r(x,"P",{"data-svelte-h":!0}),y(Oe)!=="svelte-1sr6eg8"&&(Oe.innerHTML=fo),_n=s(x),W=r(x,"DIV",{class:!0});var F=J(W);c(Ie.$$.fragment,F),Mn=s(F),Ke=r(F,"P",{"data-svelte-h":!0}),y(Ke)!=="svelte-v78lg8"&&(Ke.textContent=yo),Tn=s(F),c(E.$$.fragment,F),F.forEach(n),bn=s(x),z=r(x,"DIV",{class:!0});var qe=J(z);c(Xe.$$.fragment,qe),wn=s(qe),et=r(qe,"P",{"data-svelte-h":!0}),y(et)!=="svelte-16q0ax1"&&(et.textContent=_o),qe.forEach(n),x.forEach(n),St=s(e),c(Ge.$$.fragment,e),At=s(e),X=r(e,"DIV",{class:!0});var B=J(X);c(xe.$$.fragment,B),Jn=s(B),tt=r(B,"P",{"data-svelte-h":!0}),y(tt)!=="svelte-10tczlw"&&(tt.textContent=Mo),vn=s(B),nt=r(B,"P",{"data-svelte-h":!0}),y(nt)!=="svelte-1sr6eg8"&&(nt.innerHTML=To),Un=s(B),L=r(B,"DIV",{class:!0});var yt=J(L);c(Be.$$.fragment,yt),Zn=s(yt),ot=r(yt,"P",{"data-svelte-h":!0}),y(ot)!=="svelte-v78lg8"&&(ot.textContent=bo),jn=s(yt),c(Y.$$.fragment,yt),yt.forEach(n),In=s(B),Q=r(B,"DIV",{class:!0});var an=J(Q);c(Ve.$$.fragment,an),Xn=s(an),st=r(an,"P",{"data-svelte-h":!0}),y(st)!=="svelte-16q0ax1"&&(st.textContent=wo),an.forEach(n),B.forEach(n),qt=s(e),c(ke.$$.fragment,e),Dt=s(e),v=r(e,"DIV",{class:!0});var G=J(v);c(We.$$.fragment,G),Gn=s(G),at=r(G,"P",{"data-svelte-h":!0}),y(at)!=="svelte-4vzu4m"&&(at.textContent=Jo),xn=s(G),it=r(G,"P",{"data-svelte-h":!0}),y(it)!=="svelte-1sr6eg8"&&(it.innerHTML=vo),Bn=s(G),C=r(G,"DIV",{class:!0});var _t=J(C);c(Le.$$.fragment,_t),Vn=s(_t),lt=r(_t,"P",{"data-svelte-h":!0}),y(lt)!=="svelte-v78lg8"&&(lt.textContent=Uo),kn=s(_t),c(P.$$.fragment,_t),_t.forEach(n),Wn=s(G),S=r(G,"DIV",{class:!0});var ln=J(S);c(Ce.$$.fragment,ln),Ln=s(ln),rt=r(ln,"P",{"data-svelte-h":!0}),y(rt)!=="svelte-9ak1um"&&(rt.textContent=Zo),ln.forEach(n),Cn=s(G),A=r(G,"DIV",{class:!0});var rn=J(A);c(Re.$$.fragment,rn),Rn=s(rn),dt=r(rn,"P",{"data-svelte-h":!0}),y(dt)!=="svelte-16q0ax1"&&(dt.textContent=jo),rn.forEach(n),Nn=s(G),q=r(G,"DIV",{class:!0});var dn=J(q);c(Ne.$$.fragment,dn),Fn=s(dn),pt=r(dn,"P",{"data-svelte-h":!0}),y(pt)!=="svelte-1eod455"&&(pt.textContent=Io),dn.forEach(n),G.forEach(n),Ot=s(e),c(Fe.$$.fragment,e),Kt=s(e),Z=r(e,"DIV",{class:!0});var V=J(Z);c(He.$$.fragment,V),Hn=s(V),ct=r(V,"DIV",{class:!0});var ko=J(ct);c($e.$$.fragment,ko),ko.forEach(n),$n=s(V),D=r(V,"DIV",{class:!0});var pn=J(D);c(Ee.$$.fragment,pn),En=s(pn),mt=r(pn,"P",{"data-svelte-h":!0}),y(mt)!=="svelte-1s3c06i"&&(mt.innerHTML=Xo),pn.forEach(n),zn=s(V),O=r(V,"DIV",{class:!0});var cn=J(O);c(ze.$$.fragment,cn),Yn=s(cn),gt=r(cn,"P",{"data-svelte-h":!0}),y(gt)!=="svelte-pkn4ui"&&(gt.innerHTML=Go),cn.forEach(n),Qn=s(V),K=r(V,"DIV",{class:!0});var mn=J(K);c(Ye.$$.fragment,mn),Pn=s(mn),ht=r(mn,"P",{"data-svelte-h":!0}),y(ht)!=="svelte-14bnrb6"&&(ht.textContent=xo),mn.forEach(n),Sn=s(V),ee=r(V,"DIV",{class:!0});var gn=J(ee);c(Qe.$$.fragment,gn),An=s(gn),ut=r(gn,"P",{"data-svelte-h":!0}),y(ut)!=="svelte-1xwrf7t"&&(ut.textContent=Bo),gn.forEach(n),V.forEach(n),en=s(e),c(Pe.$$.fragment,e),tn=s(e),N=r(e,"DIV",{class:!0});var hn=J(N);c(Se.$$.fragment,hn),qn=s(hn),ft=r(hn,"P",{"data-svelte-h":!0}),y(ft)!=="svelte-ia4jjd"&&(ft.textContent=Vo),hn.forEach(n),nn=s(e),c(Ae.$$.fragment,e),on=s(e),Mt=r(e,"P",{}),J(Mt).forEach(n),this.h()},h(){b(f,"name","hf:doc:metadata"),b(f,"content",Po),b(w,"class","flex flex-wrap space-x-1"),b(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ct,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){i(document.head,f),a(e,j,t),a(e,M,t),a(e,_,t),m(T,e,t),a(e,d,t),a(e,w,t),a(e,Tt,t),a(e,te,t),a(e,bt,t),m($,e,t),a(e,wt,t),a(e,ne,t),a(e,Jt,t),a(e,oe,t),a(e,vt,t),a(e,se,t),a(e,Ut,t),m(ae,e,t),a(e,Zt,t),a(e,ie,t),a(e,jt,t),a(e,le,t),a(e,It,t),m(re,e,t),a(e,Xt,t),a(e,de,t),a(e,Gt,t),m(pe,e,t),a(e,xt,t),m(ce,e,t),a(e,Bt,t),a(e,me,t),a(e,Vt,t),m(ge,e,t),a(e,kt,t),a(e,he,t),a(e,Wt,t),m(ue,e,t),a(e,Lt,t),a(e,fe,t),a(e,Ct,t),m(ye,e,t),a(e,Rt,t),a(e,_e,t),a(e,Nt,t),a(e,Me,t),a(e,Ft,t),m(Te,e,t),a(e,Ht,t),a(e,be,t),a(e,$t,t),m(we,e,t),a(e,Et,t),a(e,Je,t),a(e,zt,t),a(e,ve,t),a(e,Yt,t),m(Ue,e,t),a(e,Qt,t),m(Ze,e,t),a(e,Pt,t),a(e,I,t),m(je,I,null),i(I,fn),i(I,De),i(I,yn),i(I,Oe),i(I,_n),i(I,W),m(Ie,W,null),i(W,Mn),i(W,Ke),i(W,Tn),m(E,W,null),i(I,bn),i(I,z),m(Xe,z,null),i(z,wn),i(z,et),a(e,St,t),m(Ge,e,t),a(e,At,t),a(e,X,t),m(xe,X,null),i(X,Jn),i(X,tt),i(X,vn),i(X,nt),i(X,Un),i(X,L),m(Be,L,null),i(L,Zn),i(L,ot),i(L,jn),m(Y,L,null),i(X,In),i(X,Q),m(Ve,Q,null),i(Q,Xn),i(Q,st),a(e,qt,t),m(ke,e,t),a(e,Dt,t),a(e,v,t),m(We,v,null),i(v,Gn),i(v,at),i(v,xn),i(v,it),i(v,Bn),i(v,C),m(Le,C,null),i(C,Vn),i(C,lt),i(C,kn),m(P,C,null),i(v,Wn),i(v,S),m(Ce,S,null),i(S,Ln),i(S,rt),i(v,Cn),i(v,A),m(Re,A,null),i(A,Rn),i(A,dt),i(v,Nn),i(v,q),m(Ne,q,null),i(q,Fn),i(q,pt),a(e,Ot,t),m(Fe,e,t),a(e,Kt,t),a(e,Z,t),m(He,Z,null),i(Z,Hn),i(Z,ct),m($e,ct,null),i(Z,$n),i(Z,D),m(Ee,D,null),i(D,En),i(D,mt),i(Z,zn),i(Z,O),m(ze,O,null),i(O,Yn),i(O,gt),i(Z,Qn),i(Z,K),m(Ye,K,null),i(K,Pn),i(K,ht),i(Z,Sn),i(Z,ee),m(Qe,ee,null),i(ee,An),i(ee,ut),a(e,en,t),m(Pe,e,t),a(e,tn,t),a(e,N,t),m(Se,N,null),i(N,qn),i(N,ft),a(e,nn,t),m(Ae,e,t),a(e,on,t),a(e,Mt,t),sn=!0},p(e,[t]){const x={};t&2&&(x.$$scope={dirty:t,ctx:e}),$.$set(x);const F={};t&2&&(F.$$scope={dirty:t,ctx:e}),E.$set(F);const qe={};t&2&&(qe.$$scope={dirty:t,ctx:e}),Y.$set(qe);const B={};t&2&&(B.$$scope={dirty:t,ctx:e}),P.$set(B)},i(e){sn||(g(T.$$.fragment,e),g($.$$.fragment,e),g(ae.$$.fragment,e),g(re.$$.fragment,e),g(pe.$$.fragment,e),g(ce.$$.fragment,e),g(ge.$$.fragment,e),g(ue.$$.fragment,e),g(ye.$$.fragment,e),g(Te.$$.fragment,e),g(we.$$.fragment,e),g(Ue.$$.fragment,e),g(Ze.$$.fragment,e),g(je.$$.fragment,e),g(Ie.$$.fragment,e),g(E.$$.fragment,e),g(Xe.$$.fragment,e),g(Ge.$$.fragment,e),g(xe.$$.fragment,e),g(Be.$$.fragment,e),g(Y.$$.fragment,e),g(Ve.$$.fragment,e),g(ke.$$.fragment,e),g(We.$$.fragment,e),g(Le.$$.fragment,e),g(P.$$.fragment,e),g(Ce.$$.fragment,e),g(Re.$$.fragment,e),g(Ne.$$.fragment,e),g(Fe.$$.fragment,e),g(He.$$.fragment,e),g($e.$$.fragment,e),g(Ee.$$.fragment,e),g(ze.$$.fragment,e),g(Ye.$$.fragment,e),g(Qe.$$.fragment,e),g(Pe.$$.fragment,e),g(Se.$$.fragment,e),g(Ae.$$.fragment,e),sn=!0)},o(e){h(T.$$.fragment,e),h($.$$.fragment,e),h(ae.$$.fragment,e),h(re.$$.fragment,e),h(pe.$$.fragment,e),h(ce.$$.fragment,e),h(ge.$$.fragment,e),h(ue.$$.fragment,e),h(ye.$$.fragment,e),h(Te.$$.fragment,e),h(we.$$.fragment,e),h(Ue.$$.fragment,e),h(Ze.$$.fragment,e),h(je.$$.fragment,e),h(Ie.$$.fragment,e),h(E.$$.fragment,e),h(Xe.$$.fragment,e),h(Ge.$$.fragment,e),h(xe.$$.fragment,e),h(Be.$$.fragment,e),h(Y.$$.fragment,e),h(Ve.$$.fragment,e),h(ke.$$.fragment,e),h(We.$$.fragment,e),h(Le.$$.fragment,e),h(P.$$.fragment,e),h(Ce.$$.fragment,e),h(Re.$$.fragment,e),h(Ne.$$.fragment,e),h(Fe.$$.fragment,e),h(He.$$.fragment,e),h($e.$$.fragment,e),h(Ee.$$.fragment,e),h(ze.$$.fragment,e),h(Ye.$$.fragment,e),h(Qe.$$.fragment,e),h(Pe.$$.fragment,e),h(Se.$$.fragment,e),h(Ae.$$.fragment,e),sn=!1},d(e){e&&(n(j),n(M),n(_),n(d),n(w),n(Tt),n(te),n(bt),n(wt),n(ne),n(Jt),n(oe),n(vt),n(se),n(Ut),n(Zt),n(ie),n(jt),n(le),n(It),n(Xt),n(de),n(Gt),n(xt),n(Bt),n(me),n(Vt),n(kt),n(he),n(Wt),n(Lt),n(fe),n(Ct),n(Rt),n(_e),n(Nt),n(Me),n(Ft),n(Ht),n(be),n($t),n(Et),n(Je),n(zt),n(ve),n(Yt),n(Qt),n(Pt),n(I),n(St),n(At),n(X),n(qt),n(Dt),n(v),n(Ot),n(Kt),n(Z),n(en),n(tn),n(N),n(nn),n(on),n(Mt)),n(f),u(T,e),u($,e),u(ae,e),u(re,e),u(pe,e),u(ce,e),u(ge,e),u(ue,e),u(ye,e),u(Te,e),u(we,e),u(Ue,e),u(Ze,e),u(je),u(Ie),u(E),u(Xe),u(Ge,e),u(xe),u(Be),u(Y),u(Ve),u(ke,e),u(We),u(Le),u(P),u(Ce),u(Re),u(Ne),u(Fe,e),u(He),u($e),u(Ee),u(ze),u(Ye),u(Qe),u(Pe,e),u(Se),u(Ae,e)}}}const Po='{"title":"LTX Video","local":"ltx-video","sections":[{"title":"Recommended settings for generation","local":"recommended-settings-for-generation","sections":[],"depth":2},{"title":"Using LTX Video 13B 0.9.7","local":"using-ltx-video-13b-097","sections":[],"depth":2},{"title":"Loading Single Files","local":"loading-single-files","sections":[],"depth":2},{"title":"Quantization","local":"quantization","sections":[],"depth":2},{"title":"LTXPipeline","local":"diffusers.LTXPipeline","sections":[],"depth":2},{"title":"LTXImageToVideoPipeline","local":"diffusers.LTXImageToVideoPipeline","sections":[],"depth":2},{"title":"LTXConditionPipeline","local":"diffusers.LTXConditionPipeline","sections":[],"depth":2},{"title":"LTXLatentUpsamplePipeline","local":"diffusers.LTXLatentUpsamplePipeline","sections":[],"depth":2},{"title":"LTXPipelineOutput","local":"diffusers.pipelines.ltx.pipeline_output.LTXPipelineOutput","sections":[],"depth":2}],"depth":1}';function So(k){return Lo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ns extends Co{constructor(f){super(),Ro(this,f,So,Qo,Wo,{})}}export{ns as component}; | |
Xet Storage Details
- Size:
- 129 kB
- Xet hash:
- feda229f53682dc0e5f7845b5efc6731becba9b5baceffe60ecc777f86528709
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.