Buckets:
| import{s as ta,o as oa,n as na}from"../chunks/scheduler.8c3d61f6.js";import{S as ia,i as pa,g as p,s,r,A as ra,h as c,f as a,c as n,j as _,u as d,x as J,k as I,y as i,a as t,v as m,d as M,t as u,w as h}from"../chunks/index.da70eac4.js";import{D as k}from"../chunks/Docstring.81aa96ab.js";import{C as ze}from"../chunks/CodeBlock.a9c4becf.js";import{E as sa}from"../chunks/ExampleCodeBlock.a775434f.js";import{H as U,E as ca}from"../chunks/getInferenceSnippets.7d4354d6.js";function da(Ue){let g,G="Examples:",b,T,w;return T=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBaW1hZ2VfcGF0aHMlMjAlM0QlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjMlMjBpbi1jb250ZXh0JTIwZXhhbXBsZXMlMEElMjAlMjAlMjAlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfbWFzazJpbWFnZV9pbmNvbnRleHQtZXhhbXBsZS0xX21hc2suanBnJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9tYXNrMmltYWdlX2luY29udGV4dC1leGFtcGxlLTFfaW1hZ2UuanBnJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMCU1RCUyQyUwQSUyMCUyMCUyMCUyMCUyMyUyMHF1ZXJ5JTIwd2l0aCUyMHRoZSUyMHRhcmdldCUyMGltYWdlJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX21hc2syaW1hZ2VfcXVlcnlfbWFzay5qcGclMjIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjApJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwTm9uZSUyQyUyMCUyMCUyMyUyME5vJTIwaW1hZ2UlMjBuZWVkZWQlMjBmb3IlMjB0aGUlMjB0YXJnZXQlMjBpbWFnZSUwQSUyMCUyMCUyMCUyMCU1RCUyQyUwQSU1RCUwQXRhc2tfcHJvbXB0JTIwJTNEJTIwJTIySW4lMjBlYWNoJTIwcm93JTJDJTIwYSUyMGxvZ2ljYWwlMjB0YXNrJTIwaXMlMjBkZW1vbnN0cmF0ZWQlMjB0byUyMGFjaGlldmUlMjAlNUJJTUFHRTIlNUQlMjBhbiUyMGFlc3RoZXRpY2FsbHklMjBwbGVhc2luZyUyMHBob3RvZ3JhcGglMjBiYXNlZCUyMG9uJTIwJTVCSU1BR0UxJTVEJTIwc2FtJTIwMi1nZW5lcmF0ZWQlMjBtYXNrcyUyMHdpdGglMjByaWNoJTIwY29sb3IlMjBjb2RpbmcuJTIyJTBBY29udGVudF9wcm9tcHQlMjAlM0QlMjAlMjJNYWplc3RpYyUyMHBob3RvJTIwb2YlMjBhJTIwZ29sZGVuJTIwZWFnbGUlMjBwZXJjaGVkJTIwb24lMjBhJTIwcm9ja3klMjBvdXRjcm9wJTIwaW4lMjBhJTIwbW91bnRhaW5vdXMlMjBsYW5kc2NhcGUuJTIwVGhlJTIwZWFnbGUlMjBpcyUyMHBvc2l0aW9uZWQlMjBpbiUyMHRoZSUyMHJpZ2h0JTIwZm9yZWdyb3VuZCUyQyUyMGZhY2luZyUyMGxlZnQlMkMlMjB3aXRoJTIwaXRzJTIwc2hhcnAlMjBiZWFrJTIwYW5kJTIwa2VlbiUyMGV5ZXMlMjBwcm9taW5lbnRseSUyMHZpc2libGUuJTIwSXRzJTIwcGx1bWFnZSUyMGlzJTIwYSUyMG1peCUyMG9mJTIwZGFyayUyMGJyb3duJTIwYW5kJTIwZ29sZGVuJTIwaHVlcyUyQyUyMHdpdGglMjBpbnRyaWNhdGUlMjBmZWF0aGVyJTIwZGV0YWlscy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwZmVhdHVyZXMlMjBhJTIwc29mdC1mb2N1cyUyMHZpZXclMjBvZiUyMHNub3ctY2FwcGVkJTIwbW91bnRhaW5zJTIwdW5kZXIlMjBhJTIwY2xvdWR5JTIwc2t5JTJDJTIwY3JlYXRpbmclMjBhJTIwc2VyZW5lJTIwYW5kJTIwZ3JhbmRpb3NlJTIwYXRtb3NwaGVyZS4lMjBUaGUlMjBmb3JlZ3JvdW5kJTIwaW5jbHVkZXMlMjBydWdnZWQlMjByb2NrcyUyMGFuZCUyMHBhdGNoZXMlMjBvZiUyMGdyZWVuJTIwbW9zcy4lMjBQaG90b3JlYWxpc3RpYyUyQyUyMG1lZGl1bSUyMGRlcHRoJTIwb2YlMjBmaWVsZCUyQyUyMHNvZnQlMjBuYXR1cmFsJTIwbGlnaHRpbmclMkMlMjBjb29sJTIwY29sb3IlMjBwYWxldHRlJTJDJTIwaGlnaCUyMGNvbnRyYXN0JTJDJTIwc2hhcnAlMjBmb2N1cyUyMG9uJTIwdGhlJTIwZWFnbGUlMkMlMjBibHVycmVkJTIwYmFja2dyb3VuZCUyQyUyMHRyYW5xdWlsJTJDJTIwbWFqZXN0aWMlMkMlMjB3aWxkbGlmZSUyMHBob3RvZ3JhcGh5LiUyMiUwQXBpcGUlMjAlM0QlMjBWaXN1YWxDbG96ZVBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJWaXN1YWxDbG96ZSUyRlZpc3VhbENsb3plUGlwZWxpbmUtMzg0JTIyJTJDJTIwcmVzb2x1dGlvbiUzRDM4NCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjB0YXNrX3Byb21wdCUzRHRhc2tfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udGVudF9wcm9tcHQlM0Rjb250ZW50X3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2VfcGF0aHMlMkMlMEElMjAlMjAlMjAlMjB1cHNhbXBsaW5nX3dpZHRoJTNEMTM0NCUyQyUwQSUyMCUyMCUyMCUyMHVwc2FtcGxpbmdfaGVpZ2h0JTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwdXBzYW1wbGluZ19zdHJlbmd0aCUzRDAuNCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMkMlMEEpLmltYWdlcyU1QjAlNUQlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJ2aXN1YWxjbG96ZS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozePipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>image_paths = [ | |
| <span class="hljs-meta">... </span> <span class="hljs-comment"># in-context examples</span> | |
| <span class="hljs-meta">... </span> [ | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_mask.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_image.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> ], | |
| <span class="hljs-meta">... </span> <span class="hljs-comment"># query with the target image</span> | |
| <span class="hljs-meta">... </span> [ | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_query_mask.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| <span class="hljs-meta">... </span> ], | |
| <span class="hljs-meta">... </span>] | |
| <span class="hljs-meta">>>> </span>task_prompt = <span class="hljs-string">"In each row, a logical task is demonstrated to achieve [IMAGE2] an aesthetically pleasing photograph based on [IMAGE1] sam 2-generated masks with rich color coding."</span> | |
| <span class="hljs-meta">>>> </span>content_prompt = <span class="hljs-string">"Majestic photo of a golden eagle perched on a rocky outcrop in a mountainous landscape. The eagle is positioned in the right foreground, facing left, with its sharp beak and keen eyes prominently visible. Its plumage is a mix of dark brown and golden hues, with intricate feather details. The background features a soft-focus view of snow-capped mountains under a cloudy sky, creating a serene and grandiose atmosphere. The foreground includes rugged rocks and patches of green moss. Photorealistic, medium depth of field, soft natural lighting, cool color palette, high contrast, sharp focus on the eagle, blurred background, tranquil, majestic, wildlife photography."</span> | |
| <span class="hljs-meta">>>> </span>pipe = VisualClozePipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> task_prompt=task_prompt, | |
| <span class="hljs-meta">... </span> content_prompt=content_prompt, | |
| <span class="hljs-meta">... </span> image=image_paths, | |
| <span class="hljs-meta">... </span> upsampling_width=<span class="hljs-number">1344</span>, | |
| <span class="hljs-meta">... </span> upsampling_height=<span class="hljs-number">768</span>, | |
| <span class="hljs-meta">... </span> upsampling_strength=<span class="hljs-number">0.4</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> max_sequence_length=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),{c(){g=p("p"),g.textContent=G,b=s(),r(T.$$.fragment)},l(o){g=c(o,"P",{"data-svelte-h":!0}),J(g)!=="svelte-kvfsh7"&&(g.textContent=G),b=n(o),d(T.$$.fragment,o)},m(o,j){t(o,g,j),t(o,b,j),m(T,o,j),w=!0},p:na,i(o){w||(M(T.$$.fragment,o),w=!0)},o(o){u(T.$$.fragment,o),w=!1},d(o){o&&(a(g),a(b)),h(T,o)}}}function ma(Ue){let g,G="Examples:",b,T,w;return T=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVHZW5lcmF0aW9uUGlwZWxpbmUlMkMlMjBGbHV4RmlsbFBpcGVsaW5lJTIwYXMlMjBWaXN1YWxDbG96ZVVwc2FtcGxpbmdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBJTBBaW1hZ2VfcGF0aHMlMjAlM0QlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjMlMjBpbi1jb250ZXh0JTIwZXhhbXBsZXMlMEElMjAlMjAlMjAlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfbWFzazJpbWFnZV9pbmNvbnRleHQtZXhhbXBsZS0xX21hc2suanBnJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9tYXNrMmltYWdlX2luY29udGV4dC1leGFtcGxlLTFfaW1hZ2UuanBnJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMCU1RCUyQyUwQSUyMCUyMCUyMCUyMCUyMyUyMHF1ZXJ5JTIwd2l0aCUyMHRoZSUyMHRhcmdldCUyMGltYWdlJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX21hc2syaW1hZ2VfcXVlcnlfbWFzay5qcGclMjIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjApJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwTm9uZSUyQyUyMCUyMCUyMyUyME5vJTIwaW1hZ2UlMjBuZWVkZWQlMjBmb3IlMjB0aGUlMjB0YXJnZXQlMjBpbWFnZSUwQSUyMCUyMCUyMCUyMCU1RCUyQyUwQSU1RCUwQXRhc2tfcHJvbXB0JTIwJTNEJTIwJTIySW4lMjBlYWNoJTIwcm93JTJDJTIwYSUyMGxvZ2ljYWwlMjB0YXNrJTIwaXMlMjBkZW1vbnN0cmF0ZWQlMjB0byUyMGFjaGlldmUlMjAlNUJJTUFHRTIlNUQlMjBhbiUyMGFlc3RoZXRpY2FsbHklMjBwbGVhc2luZyUyMHBob3RvZ3JhcGglMjBiYXNlZCUyMG9uJTIwJTVCSU1BR0UxJTVEJTIwc2FtJTIwMi1nZW5lcmF0ZWQlMjBtYXNrcyUyMHdpdGglMjByaWNoJTIwY29sb3IlMjBjb2RpbmcuJTIyJTBBY29udGVudF9wcm9tcHQlMjAlM0QlMjAlMjJNYWplc3RpYyUyMHBob3RvJTIwb2YlMjBhJTIwZ29sZGVuJTIwZWFnbGUlMjBwZXJjaGVkJTIwb24lMjBhJTIwcm9ja3klMjBvdXRjcm9wJTIwaW4lMjBhJTIwbW91bnRhaW5vdXMlMjBsYW5kc2NhcGUuJTIwVGhlJTIwZWFnbGUlMjBpcyUyMHBvc2l0aW9uZWQlMjBpbiUyMHRoZSUyMHJpZ2h0JTIwZm9yZWdyb3VuZCUyQyUyMGZhY2luZyUyMGxlZnQlMkMlMjB3aXRoJTIwaXRzJTIwc2hhcnAlMjBiZWFrJTIwYW5kJTIwa2VlbiUyMGV5ZXMlMjBwcm9taW5lbnRseSUyMHZpc2libGUuJTIwSXRzJTIwcGx1bWFnZSUyMGlzJTIwYSUyMG1peCUyMG9mJTIwZGFyayUyMGJyb3duJTIwYW5kJTIwZ29sZGVuJTIwaHVlcyUyQyUyMHdpdGglMjBpbnRyaWNhdGUlMjBmZWF0aGVyJTIwZGV0YWlscy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwZmVhdHVyZXMlMjBhJTIwc29mdC1mb2N1cyUyMHZpZXclMjBvZiUyMHNub3ctY2FwcGVkJTIwbW91bnRhaW5zJTIwdW5kZXIlMjBhJTIwY2xvdWR5JTIwc2t5JTJDJTIwY3JlYXRpbmclMjBhJTIwc2VyZW5lJTIwYW5kJTIwZ3JhbmRpb3NlJTIwYXRtb3NwaGVyZS4lMjBUaGUlMjBmb3JlZ3JvdW5kJTIwaW5jbHVkZXMlMjBydWdnZWQlMjByb2NrcyUyMGFuZCUyMHBhdGNoZXMlMjBvZiUyMGdyZWVuJTIwbW9zcy4lMjBQaG90b3JlYWxpc3RpYyUyQyUyMG1lZGl1bSUyMGRlcHRoJTIwb2YlMjBmaWVsZCUyQyUyMHNvZnQlMjBuYXR1cmFsJTIwbGlnaHRpbmclMkMlMjBjb29sJTIwY29sb3IlMjBwYWxldHRlJTJDJTIwaGlnaCUyMGNvbnRyYXN0JTJDJTIwc2hhcnAlMjBmb2N1cyUyMG9uJTIwdGhlJTIwZWFnbGUlMkMlMjBibHVycmVkJTIwYmFja2dyb3VuZCUyQyUyMHRyYW5xdWlsJTJDJTIwbWFqZXN0aWMlMkMlMjB3aWxkbGlmZSUyMHBob3RvZ3JhcGh5LiUyMiUwQXBpcGUlMjAlM0QlMjBWaXN1YWxDbG96ZUdlbmVyYXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyVmlzdWFsQ2xvemUlMkZWaXN1YWxDbG96ZVBpcGVsaW5lLTM4NCUyMiUyQyUyMHJlc29sdXRpb24lM0QzODQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwdGFza19wcm9tcHQlM0R0YXNrX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRlbnRfcHJvbXB0JTNEY29udGVudF9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBpbWFnZSUzRGltYWdlX3BhdGhzJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QzMCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QzMCUyQyUwQSUyMCUyMCUyMCUyMG1heF9zZXF1ZW5jZV9sZW5ndGglM0Q1MTIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgwKSUyQyUwQSkuaW1hZ2VzJTVCMCU1RCU1QjAlNUQlMEElMEElMjMlMjBvcHRpb25hbCUyQyUyMHVwc2FtcGxpbmclMjB0aGUlMjBnZW5lcmF0ZWQlMjBpbWFnZSUwQXBpcGVfdXBzYW1wbGUlMjAlM0QlMjBWaXN1YWxDbG96ZVVwc2FtcGxpbmdQaXBlbGluZS5mcm9tX3BpcGUocGlwZSklMEFwaXBlX3Vwc2FtcGxlLnRvKCUyMmN1ZGElMjIpJTBBJTBBbWFza19pbWFnZSUyMCUzRCUyMEltYWdlLm5ldyglMjJSR0IlMjIlMkMlMjBpbWFnZS5zaXplJTJDJTIwKDI1NSUyQyUyMDI1NSUyQyUyMDI1NSkpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlX3Vwc2FtcGxlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdCUzRGNvbnRlbnRfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0QxMzQ0JTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwc3RyZW5ndGglM0QwLjQlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwbWF4X3NlcXVlbmNlX2xlbmd0aCUzRDUxMiUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvciglMjJjcHUlMjIpLm1hbnVhbF9zZWVkKDApJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBJTBBaW1hZ2Uuc2F2ZSglMjJ2aXN1YWxjbG96ZS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozeGenerationPipeline, FluxFillPipeline <span class="hljs-keyword">as</span> VisualClozeUpsamplingPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span>image_paths = [ | |
| <span class="hljs-meta">... </span> <span class="hljs-comment"># in-context examples</span> | |
| <span class="hljs-meta">... </span> [ | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_mask.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_image.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> ], | |
| <span class="hljs-meta">... </span> <span class="hljs-comment"># query with the target image</span> | |
| <span class="hljs-meta">... </span> [ | |
| <span class="hljs-meta">... </span> load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_query_mask.jpg"</span> | |
| <span class="hljs-meta">... </span> ), | |
| <span class="hljs-meta">... </span> <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| <span class="hljs-meta">... </span> ], | |
| <span class="hljs-meta">... </span>] | |
| <span class="hljs-meta">>>> </span>task_prompt = <span class="hljs-string">"In each row, a logical task is demonstrated to achieve [IMAGE2] an aesthetically pleasing photograph based on [IMAGE1] sam 2-generated masks with rich color coding."</span> | |
| <span class="hljs-meta">>>> </span>content_prompt = <span class="hljs-string">"Majestic photo of a golden eagle perched on a rocky outcrop in a mountainous landscape. The eagle is positioned in the right foreground, facing left, with its sharp beak and keen eyes prominently visible. Its plumage is a mix of dark brown and golden hues, with intricate feather details. The background features a soft-focus view of snow-capped mountains under a cloudy sky, creating a serene and grandiose atmosphere. The foreground includes rugged rocks and patches of green moss. Photorealistic, medium depth of field, soft natural lighting, cool color palette, high contrast, sharp focus on the eagle, blurred background, tranquil, majestic, wildlife photography."</span> | |
| <span class="hljs-meta">>>> </span>pipe = VisualClozeGenerationPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> task_prompt=task_prompt, | |
| <span class="hljs-meta">... </span> content_prompt=content_prompt, | |
| <span class="hljs-meta">... </span> image=image_paths, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> max_sequence_length=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># optional, upsampling the generated image</span> | |
| <span class="hljs-meta">>>> </span>pipe_upsample = VisualClozeUpsamplingPipeline.from_pipe(pipe) | |
| <span class="hljs-meta">>>> </span>pipe_upsample.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>mask_image = Image.new(<span class="hljs-string">"RGB"</span>, image.size, (<span class="hljs-number">255</span>, <span class="hljs-number">255</span>, <span class="hljs-number">255</span>)) | |
| <span class="hljs-meta">>>> </span>image = pipe_upsample( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> prompt=content_prompt, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">1344</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">768</span>, | |
| <span class="hljs-meta">... </span> strength=<span class="hljs-number">0.4</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> max_sequence_length=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),{c(){g=p("p"),g.textContent=G,b=s(),r(T.$$.fragment)},l(o){g=c(o,"P",{"data-svelte-h":!0}),J(g)!=="svelte-kvfsh7"&&(g.textContent=G),b=n(o),d(T.$$.fragment,o)},m(o,j){t(o,g,j),t(o,b,j),m(T,o,j),w=!0},p:na,i(o){w||(M(T.$$.fragment,o),w=!0)},o(o){u(T.$$.fragment,o),w=!1},d(o){o&&(a(g),a(b)),h(T,o)}}}function Ma(Ue){let g,G,b,T,w,o,j,Yl='<a href="https://huggingface.co/papers/2504.07960" rel="nofollow">VisualCloze: A Universal Image Generation Framework via Visual In-Context Learning</a> is an innovative in-context learning based universal image generation framework that offers key capabilities:',Ye,N,Xl="<li>Support for various in-domain tasks</li> <li>Generalization to unseen tasks through in-context learning</li> <li>Unify multiple tasks into one step and generate both target image and intermediate results</li> <li>Support reverse-engineering conditions from target images</li>",Xe,F,Re,A,Rl="The abstract from the paper is:",Ne,E,Nl='<em>Recent progress in diffusion models significantly advances various image generation tasks. However, the current mainstream approach remains focused on building task-specific models, which have limited efficiency when supporting a wide range of different needs. While universal models attempt to address this limitation, they face critical challenges, including generalizable task instruction, appropriate task distributions, and unified architectural design. To tackle these challenges, we propose VisualCloze, a universal image generation framework, which supports a wide range of in-domain tasks, generalization to unseen ones, unseen unification of multiple tasks, and reverse generation. Unlike existing methods that rely on language-based task instruction, leading to task ambiguity and weak generalization, we integrate visual in-context learning, allowing models to identify tasks from visual demonstrations. Meanwhile, the inherent sparsity of visual task distributions hampers the learning of transferable knowledge across tasks. To this end, we introduce Graph200K, a graph-structured dataset that establishes various interrelated tasks, enhancing task density and transferable knowledge. Furthermore, we uncover that our unified image generation formulation shared a consistent objective with image infilling, enabling us to leverage the strong generative priors of pre-trained infilling models without modifying the architectures. The codes, dataset, and models are available at <a href="https://visualcloze.github.io" rel="nofollow">https://visualcloze.github.io</a>.</em>',Fe,H,Ae,Q,Ee,S,Fl="VisualCloze is a two-stage cascade pipeline, containing <code>VisualClozeGenerationPipeline</code> and <code>VisualClozeUpsamplingPipeline</code>.",He,$,Al='<li>In <code>VisualClozeGenerationPipeline</code>, each image is downsampled before concatenating images into a grid layout, avoiding excessively high resolutions. VisualCloze releases two models suitable for diffusers, i.e., <a href="https://huggingface.co/VisualCloze/VisualClozePipeline-384" rel="nofollow">VisualClozePipeline-384</a> and <a href="https://huggingface.co/VisualCloze/VisualClozePipeline-384" rel="nofollow">VisualClozePipeline-512</a>, which downsample images to resolutions of 384 and 512, respectively.</li> <li><code>VisualClozeUpsamplingPipeline</code> uses <a href="https://huggingface.co/papers/2108.01073" rel="nofollow">SDEdit</a> to enable high-resolution image synthesis.</li>',Qe,L,El="The <code>VisualClozePipeline</code> integrates both stages to support convenient end-to-end sampling, while also allowing users to utilize each pipeline independently as needed.",Se,P,$e,q,Le,D,Hl="<li>Task prompt: Required to describe the generation task intention</li> <li>Content prompt: Optional description or caption of the target image</li> <li>When content prompt is not needed, pass <code>None</code></li> <li>For batch inference, pass <code>List[str|None]</code></li>",Pe,K,qe,O,Ql="<li>Format: <code>List[List[Image|None]]</code></li> <li>Structure:<ul><li>All rows except the last represent in-context examples</li> <li>Last row represents the current query (target image set to <code>None</code>)</li></ul></li> <li>For batch inference, pass <code>List[List[List[Image|None]]]</code></li>",De,ee,Ke,le,Sl="<li>Default behavior:<ul><li>Initial generation in the first stage: area of ${pipe.resolution}^2$</li> <li>Upsampling in the second stage: 3x factor</li></ul></li> <li>Custom resolution: Adjust using <code>upsampling_height</code> and <code>upsampling_width</code> parameters</li>",Oe,ae,el,se,$l='For comprehensive examples covering a wide range of tasks, please refer to the <a href="https://huggingface.co/spaces/VisualCloze/VisualCloze" rel="nofollow">Online Demo</a> and <a href="https://github.com/lzyhha/VisualCloze" rel="nofollow">GitHub Repository</a>. Below are simple examples for three cases: mask-to-image conversion, edge detection, and subject-driven generation.',ll,ne,al,te,sl,oe,nl,ie,tl,pe,ol,re,il,ce,pl,de,rl,me,cl,Z,Me,wl,Ze,Ll=`The VisualCloze pipeline for image generation with visual context. Reference: | |
| <a href="https://github.com/lzyhha/VisualCloze/tree/main" rel="nofollow">https://github.com/lzyhha/VisualCloze/tree/main</a>. This pipeline is designed to generate images based on visual | |
| in-context examples.`,fl,W,ue,bl,_e,Pl="Function invoked when calling the VisualCloze pipeline for generation.",jl,v,dl,he,ml,y,ge,Ul,Ie,ql=`The VisualCloze pipeline for image generation with visual context. Reference: | |
| <a href="https://github.com/lzyhha/VisualCloze/tree/main" rel="nofollow">https://github.com/lzyhha/VisualCloze/tree/main</a> This pipeline is designed to generate images based on visual | |
| in-context examples.`,Zl,B,ye,_l,Ge,Dl="Function invoked when calling the VisualCloze pipeline for generation.",Il,z,Gl,x,Je,Wl,We,Kl=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Bl,Y,Te,Cl,Be,Ol=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Vl,X,we,kl,Ce,ea=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,vl,R,fe,zl,Ve,la=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,xl,ke,be,Ml,je,ul,xe,hl;return w=new U({props:{title:"VisualCloze",local:"visualcloze",headingTag:"h1"}}),F=new U({props:{title:"Overview",local:"overview",headingTag:"h2"}}),H=new U({props:{title:"Inference",local:"inference",headingTag:"h2"}}),Q=new U({props:{title:"Model loading",local:"model-loading",headingTag:"h3"}}),P=new U({props:{title:"Input Specifications",local:"input-specifications",headingTag:"h3"}}),q=new U({props:{title:"Task and Content Prompts",local:"task-and-content-prompts",headingTag:"h4"}}),K=new U({props:{title:"Image Input Format",local:"image-input-format",headingTag:"h4"}}),ee=new U({props:{title:"Resolution Control",local:"resolution-control",headingTag:"h4"}}),ae=new U({props:{title:"Examples",local:"examples",headingTag:"h3"}}),ne=new U({props:{title:"Example for mask2image",local:"example-for-mask2image",headingTag:"h4"}}),te=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFZpc3VhbENsb3plUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlZpc3VhbENsb3plJTJGVmlzdWFsQ2xvemVQaXBlbGluZS0zODQlMjIlMkMlMjByZXNvbHV0aW9uJTNEMzg0JTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBJTIzJTIwTG9hZCUyMGluLWNvbnRleHQlMjBpbWFnZXMlMjAobWFrZSUyMHN1cmUlMjB0aGUlMjBwYXRocyUyMGFyZSUyMGNvcnJlY3QlMjBhbmQlMjBhY2Nlc3NpYmxlKSUwQWltYWdlX3BhdGhzJTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIzJTIwaW4tY29udGV4dCUyMGV4YW1wbGVzJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9tYXNrMmltYWdlX2luY29udGV4dC1leGFtcGxlLTFfbWFzay5qcGcnKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJ2h0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfbWFzazJpbWFnZV9pbmNvbnRleHQtZXhhbXBsZS0xX2ltYWdlLmpwZycpJTJDJTBBJTIwJTIwJTIwJTIwJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTIzJTIwcXVlcnklMjB3aXRoJTIwdGhlJTIwdGFyZ2V0JTIwaW1hZ2UlMEElMjAlMjAlMjAlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX21hc2syaW1hZ2VfcXVlcnlfbWFzay5qcGcnKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyME5vbmUlMkMlMjAlMjMlMjBObyUyMGltYWdlJTIwbmVlZGVkJTIwZm9yJTIwdGhlJTIwdGFyZ2V0JTIwaW1hZ2UlMEElMjAlMjAlMjAlMjAlNUQlMkMlMEElNUQlMEElMEElMjMlMjBUYXNrJTIwYW5kJTIwY29udGVudCUyMHByb21wdCUwQXRhc2tfcHJvbXB0JTIwJTNEJTIwJTIySW4lMjBlYWNoJTIwcm93JTJDJTIwYSUyMGxvZ2ljYWwlMjB0YXNrJTIwaXMlMjBkZW1vbnN0cmF0ZWQlMjB0byUyMGFjaGlldmUlMjAlNUJJTUFHRTIlNUQlMjBhbiUyMGFlc3RoZXRpY2FsbHklMjBwbGVhc2luZyUyMHBob3RvZ3JhcGglMjBiYXNlZCUyMG9uJTIwJTVCSU1BR0UxJTVEJTIwc2FtJTIwMi1nZW5lcmF0ZWQlMjBtYXNrcyUyMHdpdGglMjByaWNoJTIwY29sb3IlMjBjb2RpbmcuJTIyJTBBY29udGVudF9wcm9tcHQlMjAlM0QlMjAlMjIlMjIlMjJNYWplc3RpYyUyMHBob3RvJTIwb2YlMjBhJTIwZ29sZGVuJTIwZWFnbGUlMjBwZXJjaGVkJTIwb24lMjBhJTIwcm9ja3klMjBvdXRjcm9wJTIwaW4lMjBhJTIwbW91bnRhaW5vdXMlMjBsYW5kc2NhcGUuJTIwJTBBVGhlJTIwZWFnbGUlMjBpcyUyMHBvc2l0aW9uZWQlMjBpbiUyMHRoZSUyMHJpZ2h0JTIwZm9yZWdyb3VuZCUyQyUyMGZhY2luZyUyMGxlZnQlMkMlMjB3aXRoJTIwaXRzJTIwc2hhcnAlMjBiZWFrJTIwYW5kJTIwa2VlbiUyMGV5ZXMlMjBwcm9taW5lbnRseSUyMHZpc2libGUuJTIwJTBBSXRzJTIwcGx1bWFnZSUyMGlzJTIwYSUyMG1peCUyMG9mJTIwZGFyayUyMGJyb3duJTIwYW5kJTIwZ29sZGVuJTIwaHVlcyUyQyUyMHdpdGglMjBpbnRyaWNhdGUlMjBmZWF0aGVyJTIwZGV0YWlscy4lMjAlMEFUaGUlMjBiYWNrZ3JvdW5kJTIwZmVhdHVyZXMlMjBhJTIwc29mdC1mb2N1cyUyMHZpZXclMjBvZiUyMHNub3ctY2FwcGVkJTIwbW91bnRhaW5zJTIwdW5kZXIlMjBhJTIwY2xvdWR5JTIwc2t5JTJDJTIwY3JlYXRpbmclMjBhJTIwc2VyZW5lJTIwYW5kJTIwZ3JhbmRpb3NlJTIwYXRtb3NwaGVyZS4lMjAlMEFUaGUlMjBmb3JlZ3JvdW5kJTIwaW5jbHVkZXMlMjBydWdnZWQlMjByb2NrcyUyMGFuZCUyMHBhdGNoZXMlMjBvZiUyMGdyZWVuJTIwbW9zcy4lMjBQaG90b3JlYWxpc3RpYyUyQyUyMG1lZGl1bSUyMGRlcHRoJTIwb2YlMjBmaWVsZCUyQyUyMCUwQXNvZnQlMjBuYXR1cmFsJTIwbGlnaHRpbmclMkMlMjBjb29sJTIwY29sb3IlMjBwYWxldHRlJTJDJTIwaGlnaCUyMGNvbnRyYXN0JTJDJTIwc2hhcnAlMjBmb2N1cyUyMG9uJTIwdGhlJTIwZWFnbGUlMkMlMjBibHVycmVkJTIwYmFja2dyb3VuZCUyQyUyMCUwQXRyYW5xdWlsJTJDJTIwbWFqZXN0aWMlMkMlMjB3aWxkbGlmZSUyMHBob3RvZ3JhcGh5LiUyMiUyMiUyMiUwQSUwQSUyMyUyMFJ1biUyMHRoZSUyMHBpcGVsaW5lJTBBaW1hZ2VfcmVzdWx0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjB0YXNrX3Byb21wdCUzRHRhc2tfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udGVudF9wcm9tcHQlM0Rjb250ZW50X3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2VfcGF0aHMlMkMlMEElMjAlMjAlMjAlMjB1cHNhbXBsaW5nX3dpZHRoJTNEMTM0NCUyQyUwQSUyMCUyMCUyMCUyMHVwc2FtcGxpbmdfaGVpZ2h0JTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwdXBzYW1wbGluZ19zdHJlbmd0aCUzRDAuNCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMEEpLmltYWdlcyU1QjAlNUQlNUIwJTVEJTBBJTBBJTIzJTIwU2F2ZSUyMHRoZSUyMHJlc3VsdGluZyUyMGltYWdlJTBBaW1hZ2VfcmVzdWx0LnNhdmUoJTIydmlzdWFsY2xvemUucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozePipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = VisualClozePipeline.from_pretrained(<span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-comment"># Load in-context images (make sure the paths are correct and accessible)</span> | |
| image_paths = [ | |
| <span class="hljs-comment"># in-context examples</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_mask.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_image.jpg'</span>), | |
| ], | |
| <span class="hljs-comment"># query with the target image</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_query_mask.jpg'</span>), | |
| <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| ], | |
| ] | |
| <span class="hljs-comment"># Task and content prompt</span> | |
| task_prompt = <span class="hljs-string">"In each row, a logical task is demonstrated to achieve [IMAGE2] an aesthetically pleasing photograph based on [IMAGE1] sam 2-generated masks with rich color coding."</span> | |
| content_prompt = <span class="hljs-string">"""Majestic photo of a golden eagle perched on a rocky outcrop in a mountainous landscape. | |
| The eagle is positioned in the right foreground, facing left, with its sharp beak and keen eyes prominently visible. | |
| Its plumage is a mix of dark brown and golden hues, with intricate feather details. | |
| The background features a soft-focus view of snow-capped mountains under a cloudy sky, creating a serene and grandiose atmosphere. | |
| The foreground includes rugged rocks and patches of green moss. Photorealistic, medium depth of field, | |
| soft natural lighting, cool color palette, high contrast, sharp focus on the eagle, blurred background, | |
| tranquil, majestic, wildlife photography."""</span> | |
| <span class="hljs-comment"># Run the pipeline</span> | |
| image_result = pipe( | |
| task_prompt=task_prompt, | |
| content_prompt=content_prompt, | |
| image=image_paths, | |
| upsampling_width=<span class="hljs-number">1344</span>, | |
| upsampling_height=<span class="hljs-number">768</span>, | |
| upsampling_strength=<span class="hljs-number">0.4</span>, | |
| guidance_scale=<span class="hljs-number">30</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| ).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># Save the resulting image</span> | |
| image_result.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),oe=new U({props:{title:"Example for edge-detection",local:"example-for-edge-detection",headingTag:"h4"}}),ie=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFZpc3VhbENsb3plUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlZpc3VhbENsb3plJTJGVmlzdWFsQ2xvemVQaXBlbGluZS0zODQlMjIlMkMlMjByZXNvbHV0aW9uJTNEMzg0JTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBJTIzJTIwTG9hZCUyMGluLWNvbnRleHQlMjBpbWFnZXMlMjAobWFrZSUyMHN1cmUlMjB0aGUlMjBwYXRocyUyMGFyZSUyMGNvcnJlY3QlMjBhbmQlMjBhY2Nlc3NpYmxlKSUwQWltYWdlX3BhdGhzJTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIzJTIwaW4tY29udGV4dCUyMGV4YW1wbGVzJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9lZGdlZGV0ZWN0aW9uX2luY29udGV4dC1leGFtcGxlLTFfaW1hZ2UuanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX2VkZ2VkZXRlY3Rpb25faW5jb250ZXh0LWV4YW1wbGUtMV9lZGdlLmpwZycpJTJDJTBBJTIwJTIwJTIwJTIwJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9lZGdlZGV0ZWN0aW9uX2luY29udGV4dC1leGFtcGxlLTJfaW1hZ2UuanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX2VkZ2VkZXRlY3Rpb25faW5jb250ZXh0LWV4YW1wbGUtMl9lZGdlLmpwZycpJTJDJTBBJTIwJTIwJTIwJTIwJTVEJTJDJTBBJTIwJTIwJTIwJTIwJTIzJTIwcXVlcnklMjB3aXRoJTIwdGhlJTIwdGFyZ2V0JTIwaW1hZ2UlMEElMjAlMjAlMjAlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX2VkZ2VkZXRlY3Rpb25fcXVlcnlfaW1hZ2UuanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBOb25lJTJDJTIwJTIzJTIwTm8lMjBpbWFnZSUyMG5lZWRlZCUyMGZvciUyMHRoZSUyMHRhcmdldCUyMGltYWdlJTBBJTIwJTIwJTIwJTIwJTVEJTJDJTBBJTVEJTBBJTBBJTIzJTIwVGFzayUyMGFuZCUyMGNvbnRlbnQlMjBwcm9tcHQlMEF0YXNrX3Byb21wdCUyMCUzRCUyMCUyMkVhY2glMjByb3clMjBpbGx1c3RyYXRlcyUyMGElMjBwYXRod2F5JTIwZnJvbSUyMCU1QklNQUdFMSU1RCUyMGElMjBzaGFycCUyMGFuZCUyMGJlYXV0aWZ1bGx5JTIwY29tcG9zZWQlMjBwaG90b2dyYXBoJTIwdG8lMjAlNUJJTUFHRTIlNUQlMjBlZGdlJTIwbWFwJTIwd2l0aCUyMG5hdHVyYWwlMjB3ZWxsLWNvbm5lY3RlZCUyMG91dGxpbmVzJTIwdXNpbmclMjBhJTIwY2xlYXIlMjBsb2dpY2FsJTIwdGFzay4lMjIlMEFjb250ZW50X3Byb21wdCUyMCUzRCUyMCUyMiUyMiUwQSUwQSUyMyUyMFJ1biUyMHRoZSUyMHBpcGVsaW5lJTBBaW1hZ2VfcmVzdWx0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjB0YXNrX3Byb21wdCUzRHRhc2tfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udGVudF9wcm9tcHQlM0Rjb250ZW50X3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2VfcGF0aHMlMkMlMEElMjAlMjAlMjAlMjB1cHNhbXBsaW5nX3dpZHRoJTNEODY0JTJDJTBBJTIwJTIwJTIwJTIwdXBzYW1wbGluZ19oZWlnaHQlM0QxMTUyJTJDJTBBJTIwJTIwJTIwJTIwdXBzYW1wbGluZ19zdHJlbmd0aCUzRDAuNCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMEEpLmltYWdlcyU1QjAlNUQlNUIwJTVEJTBBJTBBJTIzJTIwU2F2ZSUyMHRoZSUyMHJlc3VsdGluZyUyMGltYWdlJTBBaW1hZ2VfcmVzdWx0LnNhdmUoJTIydmlzdWFsY2xvemUucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozePipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = VisualClozePipeline.from_pretrained(<span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-comment"># Load in-context images (make sure the paths are correct and accessible)</span> | |
| image_paths = [ | |
| <span class="hljs-comment"># in-context examples</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_edgedetection_incontext-example-1_image.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_edgedetection_incontext-example-1_edge.jpg'</span>), | |
| ], | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_edgedetection_incontext-example-2_image.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_edgedetection_incontext-example-2_edge.jpg'</span>), | |
| ], | |
| <span class="hljs-comment"># query with the target image</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_edgedetection_query_image.jpg'</span>), | |
| <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| ], | |
| ] | |
| <span class="hljs-comment"># Task and content prompt</span> | |
| task_prompt = <span class="hljs-string">"Each row illustrates a pathway from [IMAGE1] a sharp and beautifully composed photograph to [IMAGE2] edge map with natural well-connected outlines using a clear logical task."</span> | |
| content_prompt = <span class="hljs-string">""</span> | |
| <span class="hljs-comment"># Run the pipeline</span> | |
| image_result = pipe( | |
| task_prompt=task_prompt, | |
| content_prompt=content_prompt, | |
| image=image_paths, | |
| upsampling_width=<span class="hljs-number">864</span>, | |
| upsampling_height=<span class="hljs-number">1152</span>, | |
| upsampling_strength=<span class="hljs-number">0.4</span>, | |
| guidance_scale=<span class="hljs-number">30</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| ).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># Save the resulting image</span> | |
| image_result.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),pe=new U({props:{title:"Example for subject-driven generation",local:"example-for-subject-driven-generation",headingTag:"h4"}}),re=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFZpc3VhbENsb3plUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlZpc3VhbENsb3plJTJGVmlzdWFsQ2xvemVQaXBlbGluZS0zODQlMjIlMkMlMjByZXNvbHV0aW9uJTNEMzg0JTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBJTIzJTIwTG9hZCUyMGluLWNvbnRleHQlMjBpbWFnZXMlMjAobWFrZSUyMHN1cmUlMjB0aGUlMjBwYXRocyUyMGFyZSUyMGNvcnJlY3QlMjBhbmQlMjBhY2Nlc3NpYmxlKSUwQWltYWdlX3BhdGhzJTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIzJTIwaW4tY29udGV4dCUyMGV4YW1wbGVzJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9zdWJqZWN0ZHJpdmVuX2luY29udGV4dC1leGFtcGxlLTFfcmVmZXJlbmNlLmpwZycpJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9zdWJqZWN0ZHJpdmVuX2luY29udGV4dC1leGFtcGxlLTFfZGVwdGguanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX3N1YmplY3Rkcml2ZW5faW5jb250ZXh0LWV4YW1wbGUtMV9pbWFnZS5qcGcnKSUyQyUwQSUyMCUyMCUyMCUyMCU1RCUyQyUwQSUyMCUyMCUyMCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJ2h0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfc3ViamVjdGRyaXZlbl9pbmNvbnRleHQtZXhhbXBsZS0yX3JlZmVyZW5jZS5qcGcnKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJ2h0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfc3ViamVjdGRyaXZlbl9pbmNvbnRleHQtZXhhbXBsZS0yX2RlcHRoLmpwZycpJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSgnaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9zdWJqZWN0ZHJpdmVuX2luY29udGV4dC1leGFtcGxlLTJfaW1hZ2UuanBnJyklMkMlMEElMjAlMjAlMjAlMjAlNUQlMkMlMEElMjAlMjAlMjAlMjAlMjMlMjBxdWVyeSUyMHdpdGglMjB0aGUlMjB0YXJnZXQlMjBpbWFnZSUwQSUyMCUyMCUyMCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJ2h0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfc3ViamVjdGRyaXZlbl9xdWVyeV9yZWZlcmVuY2UuanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCdodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX3N1YmplY3Rkcml2ZW5fcXVlcnlfZGVwdGguanBnJyklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBOb25lJTJDJTIwJTIzJTIwTm8lMjBpbWFnZSUyMG5lZWRlZCUyMGZvciUyMHRoZSUyMHRhcmdldCUyMGltYWdlJTBBJTIwJTIwJTIwJTIwJTVEJTJDJTBBJTVEJTBBJTBBJTIzJTIwVGFzayUyMGFuZCUyMGNvbnRlbnQlMjBwcm9tcHQlMEF0YXNrX3Byb21wdCUyMCUzRCUyMCUyMiUyMiUyMkVhY2glMjByb3clMjBkZXNjcmliZXMlMjBhJTIwcHJvY2VzcyUyMHRoYXQlMjBiZWdpbnMlMjB3aXRoJTIwJTVCSU1BR0UxJTVEJTIwYW4lMjBpbWFnZSUyMGNvbnRhaW5pbmclMjB0aGUlMjBrZXklMjBvYmplY3QlMkMlMjAlMEElNUJJTUFHRTIlNUQlMjBkZXB0aCUyMG1hcCUyMHJldmVhbGluZyUyMGdyYXktdG9uZWQlMjBzcGF0aWFsJTIwbGF5ZXJzJTIwYW5kJTIwcmVzdWx0cyUyMGluJTIwJTBBJTVCSU1BR0UzJTVEJTIwYW4lMjBpbWFnZSUyMHdpdGglMjBhcnRpc3RpYyUyMHF1YWxpdHlhJTIwaGlnaC1xdWFsaXR5JTIwaW1hZ2UlMjB3aXRoJTIwZXhjZXB0aW9uYWwlMjBkZXRhaWwuJTIyJTIyJTIyJTBBY29udGVudF9wcm9tcHQlMjAlM0QlMjAlMjIlMjIlMjJBJTIwdmludGFnZSUyMHBvcmNlbGFpbiUyMGNvbGxlY3RvcidzJTIwaXRlbS4lMjBCZW5lYXRoJTIwYSUyMGJsb3Nzb21pbmclMjBjaGVycnklMjB0cmVlJTIwaW4lMjBlYXJseSUyMHNwcmluZyUyQyUyMCUwQXRoaXMlMjB0cmVhc3VyZSUyMGlzJTIwcGhvdG9ncmFwaGVkJTIwdXAlMjBjbG9zZSUyQyUyMHdpdGglMjBzb2Z0JTIwcGluayUyMHBldGFscyUyMGRyaWZ0aW5nJTIwdGhyb3VnaCUyMHRoZSUyMGFpciUyMGFuZCUyMHZpYnJhbnQlMjBibG9zc29tcyUyMGZyYW1pbmclMjB0aGUlMjBzY2VuZS4lMjIlMjIlMjIlMEElMEElMjMlMjBSdW4lMjB0aGUlMjBwaXBlbGluZSUwQWltYWdlX3Jlc3VsdCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwdGFza19wcm9tcHQlM0R0YXNrX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRlbnRfcHJvbXB0JTNEY29udGVudF9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBpbWFnZSUzRGltYWdlX3BhdGhzJTJDJTBBJTIwJTIwJTIwJTIwdXBzYW1wbGluZ193aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjB1cHNhbXBsaW5nX2hlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjB1cHNhbXBsaW5nX3N0cmVuZ3RoJTNEMC4yJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QzMCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QzMCUyQyUwQSUyMCUyMCUyMCUyMG1heF9zZXF1ZW5jZV9sZW5ndGglM0Q1MTIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgwKSUwQSkuaW1hZ2VzJTVCMCU1RCU1QjAlNUQlMEElMEElMjMlMjBTYXZlJTIwdGhlJTIwcmVzdWx0aW5nJTIwaW1hZ2UlMEFpbWFnZV9yZXN1bHQuc2F2ZSglMjJ2aXN1YWxjbG96ZS5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozePipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = VisualClozePipeline.from_pretrained(<span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-comment"># Load in-context images (make sure the paths are correct and accessible)</span> | |
| image_paths = [ | |
| <span class="hljs-comment"># in-context examples</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-1_reference.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-1_depth.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-1_image.jpg'</span>), | |
| ], | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-2_reference.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-2_depth.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_incontext-example-2_image.jpg'</span>), | |
| ], | |
| <span class="hljs-comment"># query with the target image</span> | |
| [ | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_query_reference.jpg'</span>), | |
| load_image(<span class="hljs-string">'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_subjectdriven_query_depth.jpg'</span>), | |
| <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| ], | |
| ] | |
| <span class="hljs-comment"># Task and content prompt</span> | |
| task_prompt = <span class="hljs-string">"""Each row describes a process that begins with [IMAGE1] an image containing the key object, | |
| [IMAGE2] depth map revealing gray-toned spatial layers and results in | |
| [IMAGE3] an image with artistic qualitya high-quality image with exceptional detail."""</span> | |
| content_prompt = <span class="hljs-string">"""A vintage porcelain collector's item. Beneath a blossoming cherry tree in early spring, | |
| this treasure is photographed up close, with soft pink petals drifting through the air and vibrant blossoms framing the scene."""</span> | |
| <span class="hljs-comment"># Run the pipeline</span> | |
| image_result = pipe( | |
| task_prompt=task_prompt, | |
| content_prompt=content_prompt, | |
| image=image_paths, | |
| upsampling_width=<span class="hljs-number">1024</span>, | |
| upsampling_height=<span class="hljs-number">1024</span>, | |
| upsampling_strength=<span class="hljs-number">0.2</span>, | |
| guidance_scale=<span class="hljs-number">30</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| ).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># Save the resulting image</span> | |
| image_result.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),ce=new U({props:{title:"Utilize each pipeline independently",local:"utilize-each-pipeline-independently",headingTag:"h4"}}),de=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwVmlzdWFsQ2xvemVHZW5lcmF0aW9uUGlwZWxpbmUlMkMlMjBGbHV4RmlsbFBpcGVsaW5lJTIwYXMlMjBWaXN1YWxDbG96ZVVwc2FtcGxpbmdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFZpc3VhbENsb3plR2VuZXJhdGlvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJWaXN1YWxDbG96ZSUyRlZpc3VhbENsb3plUGlwZWxpbmUtMzg0JTIyJTJDJTIwcmVzb2x1dGlvbiUzRDM4NCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlX3BhdGhzJTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIzJTIwaW4tY29udGV4dCUyMGV4YW1wbGVzJTBBJTIwJTIwJTIwJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ2aXN1YWxjbG96ZSUyRnZpc3VhbGNsb3plX21hc2syaW1hZ2VfaW5jb250ZXh0LWV4YW1wbGUtMV9tYXNrLmpwZyUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCklMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnZpc3VhbGNsb3plJTJGdmlzdWFsY2xvemVfbWFzazJpbWFnZV9pbmNvbnRleHQtZXhhbXBsZS0xX2ltYWdlLmpwZyUyMiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCklMkMlMEElMjAlMjAlMjAlMjAlNUQlMkMlMEElMjAlMjAlMjAlMjAlMjMlMjBxdWVyeSUyMHdpdGglMjB0aGUlMjB0YXJnZXQlMjBpbWFnZSUwQSUyMCUyMCUyMCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGdmlzdWFsY2xvemUlMkZ2aXN1YWxjbG96ZV9tYXNrMmltYWdlX3F1ZXJ5X21hc2suanBnJTIyJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwKSUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyME5vbmUlMkMlMjAlMjAlMjMlMjBObyUyMGltYWdlJTIwbmVlZGVkJTIwZm9yJTIwdGhlJTIwdGFyZ2V0JTIwaW1hZ2UlMEElMjAlMjAlMjAlMjAlNUQlMkMlMEElNUQlMEF0YXNrX3Byb21wdCUyMCUzRCUyMCUyMkluJTIwZWFjaCUyMHJvdyUyQyUyMGElMjBsb2dpY2FsJTIwdGFzayUyMGlzJTIwZGVtb25zdHJhdGVkJTIwdG8lMjBhY2hpZXZlJTIwJTVCSU1BR0UyJTVEJTIwYW4lMjBhZXN0aGV0aWNhbGx5JTIwcGxlYXNpbmclMjBwaG90b2dyYXBoJTIwYmFzZWQlMjBvbiUyMCU1QklNQUdFMSU1RCUyMHNhbSUyMDItZ2VuZXJhdGVkJTIwbWFza3MlMjB3aXRoJTIwcmljaCUyMGNvbG9yJTIwY29kaW5nLiUyMiUwQWNvbnRlbnRfcHJvbXB0JTIwJTNEJTIwJTIyTWFqZXN0aWMlMjBwaG90byUyMG9mJTIwYSUyMGdvbGRlbiUyMGVhZ2xlJTIwcGVyY2hlZCUyMG9uJTIwYSUyMHJvY2t5JTIwb3V0Y3JvcCUyMGluJTIwYSUyMG1vdW50YWlub3VzJTIwbGFuZHNjYXBlLiUyMFRoZSUyMGVhZ2xlJTIwaXMlMjBwb3NpdGlvbmVkJTIwaW4lMjB0aGUlMjByaWdodCUyMGZvcmVncm91bmQlMkMlMjBmYWNpbmclMjBsZWZ0JTJDJTIwd2l0aCUyMGl0cyUyMHNoYXJwJTIwYmVhayUyMGFuZCUyMGtlZW4lMjBleWVzJTIwcHJvbWluZW50bHklMjB2aXNpYmxlLiUyMEl0cyUyMHBsdW1hZ2UlMjBpcyUyMGElMjBtaXglMjBvZiUyMGRhcmslMjBicm93biUyMGFuZCUyMGdvbGRlbiUyMGh1ZXMlMkMlMjB3aXRoJTIwaW50cmljYXRlJTIwZmVhdGhlciUyMGRldGFpbHMuJTIwVGhlJTIwYmFja2dyb3VuZCUyMGZlYXR1cmVzJTIwYSUyMHNvZnQtZm9jdXMlMjB2aWV3JTIwb2YlMjBzbm93LWNhcHBlZCUyMG1vdW50YWlucyUyMHVuZGVyJTIwYSUyMGNsb3VkeSUyMHNreSUyQyUyMGNyZWF0aW5nJTIwYSUyMHNlcmVuZSUyMGFuZCUyMGdyYW5kaW9zZSUyMGF0bW9zcGhlcmUuJTIwVGhlJTIwZm9yZWdyb3VuZCUyMGluY2x1ZGVzJTIwcnVnZ2VkJTIwcm9ja3MlMjBhbmQlMjBwYXRjaGVzJTIwb2YlMjBncmVlbiUyMG1vc3MuJTIwUGhvdG9yZWFsaXN0aWMlMkMlMjBtZWRpdW0lMjBkZXB0aCUyMG9mJTIwZmllbGQlMkMlMjBzb2Z0JTIwbmF0dXJhbCUyMGxpZ2h0aW5nJTJDJTIwY29vbCUyMGNvbG9yJTIwcGFsZXR0ZSUyQyUyMGhpZ2glMjBjb250cmFzdCUyQyUyMHNoYXJwJTIwZm9jdXMlMjBvbiUyMHRoZSUyMGVhZ2xlJTJDJTIwYmx1cnJlZCUyMGJhY2tncm91bmQlMkMlMjB0cmFucXVpbCUyQyUyMG1hamVzdGljJTJDJTIwd2lsZGxpZmUlMjBwaG90b2dyYXBoeS4lMjIlMEElMEElMjMlMjBTdGFnZSUyMDElM0ElMjBHZW5lcmF0ZSUyMGluaXRpYWwlMjBpbWFnZSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjB0YXNrX3Byb21wdCUzRHRhc2tfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udGVudF9wcm9tcHQlM0Rjb250ZW50X3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2VfcGF0aHMlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwbWF4X3NlcXVlbmNlX2xlbmd0aCUzRDUxMiUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvciglMjJjcHUlMjIpLm1hbnVhbF9zZWVkKDApJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTVCMCU1RCUwQSUwQSUyMyUyMFN0YWdlJTIwMiUyMChvcHRpb25hbCklM0ElMjBVcHNhbXBsZSUyMHRoZSUyMGdlbmVyYXRlZCUyMGltYWdlJTBBcGlwZV91cHNhbXBsZSUyMCUzRCUyMFZpc3VhbENsb3plVXBzYW1wbGluZ1BpcGVsaW5lLmZyb21fcGlwZShwaXBlKSUwQXBpcGVfdXBzYW1wbGUudG8oJTIyY3VkYSUyMiklMEElMEFtYXNrX2ltYWdlJTIwJTNEJTIwSW1hZ2UubmV3KCUyMlJHQiUyMiUyQyUyMGltYWdlLnNpemUlMkMlMjAoMjU1JTJDJTIwMjU1JTJDJTIwMjU1KSklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGVfdXBzYW1wbGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEY29udGVudF9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEzNDQlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q3NjglMkMlMEElMjAlMjAlMjAlMjBzdHJlbmd0aCUzRDAuNCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMkMlMEEpLmltYWdlcyU1QjAlNUQlMEElMEFpbWFnZS5zYXZlKCUyMnZpc3VhbGNsb3plLnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> VisualClozeGenerationPipeline, FluxFillPipeline <span class="hljs-keyword">as</span> VisualClozeUpsamplingPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| pipe = VisualClozeGenerationPipeline.from_pretrained( | |
| <span class="hljs-string">"VisualCloze/VisualClozePipeline-384"</span>, resolution=<span class="hljs-number">384</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| image_paths = [ | |
| <span class="hljs-comment"># in-context examples</span> | |
| [ | |
| load_image( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_mask.jpg"</span> | |
| ), | |
| load_image( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_incontext-example-1_image.jpg"</span> | |
| ), | |
| ], | |
| <span class="hljs-comment"># query with the target image</span> | |
| [ | |
| load_image( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/visualcloze/visualcloze_mask2image_query_mask.jpg"</span> | |
| ), | |
| <span class="hljs-literal">None</span>, <span class="hljs-comment"># No image needed for the target image</span> | |
| ], | |
| ] | |
| task_prompt = <span class="hljs-string">"In each row, a logical task is demonstrated to achieve [IMAGE2] an aesthetically pleasing photograph based on [IMAGE1] sam 2-generated masks with rich color coding."</span> | |
| content_prompt = <span class="hljs-string">"Majestic photo of a golden eagle perched on a rocky outcrop in a mountainous landscape. The eagle is positioned in the right foreground, facing left, with its sharp beak and keen eyes prominently visible. Its plumage is a mix of dark brown and golden hues, with intricate feather details. The background features a soft-focus view of snow-capped mountains under a cloudy sky, creating a serene and grandiose atmosphere. The foreground includes rugged rocks and patches of green moss. Photorealistic, medium depth of field, soft natural lighting, cool color palette, high contrast, sharp focus on the eagle, blurred background, tranquil, majestic, wildlife photography."</span> | |
| <span class="hljs-comment"># Stage 1: Generate initial image</span> | |
| image = pipe( | |
| task_prompt=task_prompt, | |
| content_prompt=content_prompt, | |
| image=image_paths, | |
| guidance_scale=<span class="hljs-number">30</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| ).images[<span class="hljs-number">0</span>][<span class="hljs-number">0</span>] | |
| <span class="hljs-comment"># Stage 2 (optional): Upsample the generated image</span> | |
| pipe_upsample = VisualClozeUpsamplingPipeline.from_pipe(pipe) | |
| pipe_upsample.to(<span class="hljs-string">"cuda"</span>) | |
| mask_image = Image.new(<span class="hljs-string">"RGB"</span>, image.size, (<span class="hljs-number">255</span>, <span class="hljs-number">255</span>, <span class="hljs-number">255</span>)) | |
| image = pipe_upsample( | |
| image=image, | |
| mask_image=mask_image, | |
| prompt=content_prompt, | |
| width=<span class="hljs-number">1344</span>, | |
| height=<span class="hljs-number">768</span>, | |
| strength=<span class="hljs-number">0.4</span>, | |
| guidance_scale=<span class="hljs-number">30</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"visualcloze.png"</span>)`,wrap:!1}}),me=new U({props:{title:"VisualClozePipeline",local:"diffusers.VisualClozePipeline",headingTag:"h2"}}),Me=new k({props:{name:"class diffusers.VisualClozePipeline",anchor:"diffusers.VisualClozePipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"resolution",val:": int = 384"}],parametersDescription:[{anchor:"diffusers.VisualClozePipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12279/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.VisualClozePipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12279/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.VisualClozePipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12279/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.VisualClozePipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.VisualClozePipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.VisualClozePipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.VisualClozePipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"},{anchor:"diffusers.VisualClozePipeline.resolution",description:`<strong>resolution</strong> (<code>int</code>, <em>optional</em>, defaults to 384) — | |
| The resolution of each image when concatenating images from the query and in-context examples.`,name:"resolution"}],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py#L93"}}),ue=new k({props:{name:"__call__",anchor:"diffusers.VisualClozePipeline.__call__",parameters:[{name:"task_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"content_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Optional[torch.FloatTensor] = None"},{name:"upsampling_height",val:": typing.Optional[int] = None"},{name:"upsampling_width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 30.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"},{name:"upsampling_strength",val:": float = 1.0"}],parametersDescription:[{anchor:"diffusers.VisualClozePipeline.__call__.task_prompt",description:`<strong>task_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the task intention.`,name:"task_prompt"},{anchor:"diffusers.VisualClozePipeline.__call__.content_prompt",description:`<strong>content_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the content or caption of the target image to be generated.`,name:"content_prompt"},{anchor:"diffusers.VisualClozePipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code>.`,name:"image"},{anchor:"diffusers.VisualClozePipeline.__call__.upsampling_height",description:`<strong>upsampling_height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image (i.e., output image) after upsampling via SDEdit. By | |
| default, the image is upsampled by a factor of three, and the base resolution is determined by the | |
| resolution parameter of the pipeline. When only one of <code>upsampling_height</code> or <code>upsampling_width</code> is | |
| specified, the other will be automatically set based on the aspect ratio.`,name:"upsampling_height"},{anchor:"diffusers.VisualClozePipeline.__call__.upsampling_width",description:`<strong>upsampling_width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image (i.e., output image) after upsampling via SDEdit. By | |
| default, the image is upsampled by a factor of three, and the base resolution is determined by the | |
| resolution parameter of the pipeline. When only one of <code>upsampling_height</code> or <code>upsampling_width</code> is | |
| specified, the other will be automatically set based on the aspect ratio.`,name:"upsampling_width"},{anchor:"diffusers.VisualClozePipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.VisualClozePipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.VisualClozePipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 30.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.VisualClozePipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.VisualClozePipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.VisualClozePipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.VisualClozePipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.VisualClozePipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.VisualClozePipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.VisualClozePipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.VisualClozePipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.VisualClozePipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.VisualClozePipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.VisualClozePipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"},{anchor:"diffusers.VisualClozePipeline.__call__.upsampling_strength",description:`<strong>upsampling_strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code> when upsampling the results. Must be between 0 and<ol> | |
| <li>The generated image is used as a starting point and more noise is added the higher the | |
| <code>upsampling_strength</code>. The number of denoising steps depends on the amount of noise initially added. | |
| When <code>upsampling_strength</code> is 1, added noise is maximum and the denoising process runs for the full | |
| number of iterations specified in <code>num_inference_steps</code>. A value of 0 skips the upsampling step and | |
| output the results at the resolution of <code>self.resolution</code>.</li> | |
| </ol>`,name:"upsampling_strength"}],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_combined.py#L253",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),v=new sa({props:{anchor:"diffusers.VisualClozePipeline.__call__.example",$$slots:{default:[da]},$$scope:{ctx:Ue}}}),he=new U({props:{title:"VisualClozeGenerationPipeline",local:"diffusers.VisualClozeGenerationPipeline",headingTag:"h2"}}),ge=new k({props:{name:"class diffusers.VisualClozeGenerationPipeline",anchor:"diffusers.VisualClozeGenerationPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"resolution",val:": int = 384"}],parametersDescription:[{anchor:"diffusers.VisualClozeGenerationPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12279/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.VisualClozeGenerationPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12279/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.VisualClozeGenerationPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12279/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.VisualClozeGenerationPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.VisualClozeGenerationPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.VisualClozeGenerationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.VisualClozeGenerationPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"},{anchor:"diffusers.VisualClozeGenerationPipeline.resolution",description:`<strong>resolution</strong> (<code>int</code>, <em>optional</em>, defaults to 384) — | |
| The resolution of each image when concatenating images from the query and in-context examples.`,name:"resolution"}],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L118"}}),ye=new k({props:{name:"__call__",anchor:"diffusers.VisualClozeGenerationPipeline.__call__",parameters:[{name:"task_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"content_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Optional[torch.FloatTensor] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 30.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.task_prompt",description:`<strong>task_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the task intention.`,name:"task_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.content_prompt",description:`<strong>content_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the content or caption of the target image to be generated.`,name:"content_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code>.`,name:"image"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 30.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L683",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),z=new sa({props:{anchor:"diffusers.VisualClozeGenerationPipeline.__call__.example",$$slots:{default:[ma]},$$scope:{ctx:Ue}}}),Je=new k({props:{name:"disable_vae_slicing",anchor:"diffusers.VisualClozeGenerationPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L529"}}),Te=new k({props:{name:"disable_vae_tiling",anchor:"diffusers.VisualClozeGenerationPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L544"}}),we=new k({props:{name:"enable_vae_slicing",anchor:"diffusers.VisualClozeGenerationPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L522"}}),fe=new k({props:{name:"enable_vae_tiling",anchor:"diffusers.VisualClozeGenerationPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L536"}}),be=new k({props:{name:"encode_prompt",anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt",parameters:[{name:"layout_prompt",val:": typing.Union[str, typing.List[str]]"},{name:"task_prompt",val:": typing.Union[str, typing.List[str]]"},{name:"content_prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.layout_prompt",description:`<strong>layout_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the number of in-context examples and the number of images involved in | |
| the task.`,name:"layout_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.task_prompt",description:`<strong>task_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the task intention.`,name:"task_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.content_prompt",description:`<strong>content_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to define the content or caption of the target image to be generated.`,name:"content_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.VisualClozeGenerationPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_12279/src/diffusers/pipelines/visualcloze/pipeline_visualcloze_generation.py#L287"}}),je=new ca({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/visualcloze.md"}}),{c(){g=p("meta"),G=s(),b=p("p"),T=s(),r(w.$$.fragment),o=s(),j=p("p"),j.innerHTML=Yl,Ye=s(),N=p("ol"),N.innerHTML=Xl,Xe=s(),r(F.$$.fragment),Re=s(),A=p("p"),A.textContent=Rl,Ne=s(),E=p("p"),E.innerHTML=Nl,Fe=s(),r(H.$$.fragment),Ae=s(),r(Q.$$.fragment),Ee=s(),S=p("p"),S.innerHTML=Fl,He=s(),$=p("ul"),$.innerHTML=Al,Qe=s(),L=p("p"),L.innerHTML=El,Se=s(),r(P.$$.fragment),$e=s(),r(q.$$.fragment),Le=s(),D=p("ul"),D.innerHTML=Hl,Pe=s(),r(K.$$.fragment),qe=s(),O=p("ul"),O.innerHTML=Ql,De=s(),r(ee.$$.fragment),Ke=s(),le=p("ul"),le.innerHTML=Sl,Oe=s(),r(ae.$$.fragment),el=s(),se=p("p"),se.innerHTML=$l,ll=s(),r(ne.$$.fragment),al=s(),r(te.$$.fragment),sl=s(),r(oe.$$.fragment),nl=s(),r(ie.$$.fragment),tl=s(),r(pe.$$.fragment),ol=s(),r(re.$$.fragment),il=s(),r(ce.$$.fragment),pl=s(),r(de.$$.fragment),rl=s(),r(me.$$.fragment),cl=s(),Z=p("div"),r(Me.$$.fragment),wl=s(),Ze=p("p"),Ze.innerHTML=Ll,fl=s(),W=p("div"),r(ue.$$.fragment),bl=s(),_e=p("p"),_e.textContent=Pl,jl=s(),r(v.$$.fragment),dl=s(),r(he.$$.fragment),ml=s(),y=p("div"),r(ge.$$.fragment),Ul=s(),Ie=p("p"),Ie.innerHTML=ql,Zl=s(),B=p("div"),r(ye.$$.fragment),_l=s(),Ge=p("p"),Ge.textContent=Dl,Il=s(),r(z.$$.fragment),Gl=s(),x=p("div"),r(Je.$$.fragment),Wl=s(),We=p("p"),We.innerHTML=Kl,Bl=s(),Y=p("div"),r(Te.$$.fragment),Cl=s(),Be=p("p"),Be.innerHTML=Ol,Vl=s(),X=p("div"),r(we.$$.fragment),kl=s(),Ce=p("p"),Ce.textContent=ea,vl=s(),R=p("div"),r(fe.$$.fragment),zl=s(),Ve=p("p"),Ve.textContent=la,xl=s(),ke=p("div"),r(be.$$.fragment),Ml=s(),r(je.$$.fragment),ul=s(),xe=p("p"),this.h()},l(e){const l=ra("svelte-u9bgzb",document.head);g=c(l,"META",{name:!0,content:!0}),l.forEach(a),G=n(e),b=c(e,"P",{}),_(b).forEach(a),T=n(e),d(w.$$.fragment,e),o=n(e),j=c(e,"P",{"data-svelte-h":!0}),J(j)!=="svelte-12h8xk0"&&(j.innerHTML=Yl),Ye=n(e),N=c(e,"OL",{"data-svelte-h":!0}),J(N)!=="svelte-14ytbhu"&&(N.innerHTML=Xl),Xe=n(e),d(F.$$.fragment,e),Re=n(e),A=c(e,"P",{"data-svelte-h":!0}),J(A)!=="svelte-1cwsb16"&&(A.textContent=Rl),Ne=n(e),E=c(e,"P",{"data-svelte-h":!0}),J(E)!=="svelte-5csvlm"&&(E.innerHTML=Nl),Fe=n(e),d(H.$$.fragment,e),Ae=n(e),d(Q.$$.fragment,e),Ee=n(e),S=c(e,"P",{"data-svelte-h":!0}),J(S)!=="svelte-16qb420"&&(S.innerHTML=Fl),He=n(e),$=c(e,"UL",{"data-svelte-h":!0}),J($)!=="svelte-1wbjiaa"&&($.innerHTML=Al),Qe=n(e),L=c(e,"P",{"data-svelte-h":!0}),J(L)!=="svelte-4c50zb"&&(L.innerHTML=El),Se=n(e),d(P.$$.fragment,e),$e=n(e),d(q.$$.fragment,e),Le=n(e),D=c(e,"UL",{"data-svelte-h":!0}),J(D)!=="svelte-igjn4e"&&(D.innerHTML=Hl),Pe=n(e),d(K.$$.fragment,e),qe=n(e),O=c(e,"UL",{"data-svelte-h":!0}),J(O)!=="svelte-jn993z"&&(O.innerHTML=Ql),De=n(e),d(ee.$$.fragment,e),Ke=n(e),le=c(e,"UL",{"data-svelte-h":!0}),J(le)!=="svelte-sy4cl9"&&(le.innerHTML=Sl),Oe=n(e),d(ae.$$.fragment,e),el=n(e),se=c(e,"P",{"data-svelte-h":!0}),J(se)!=="svelte-9tmo9c"&&(se.innerHTML=$l),ll=n(e),d(ne.$$.fragment,e),al=n(e),d(te.$$.fragment,e),sl=n(e),d(oe.$$.fragment,e),nl=n(e),d(ie.$$.fragment,e),tl=n(e),d(pe.$$.fragment,e),ol=n(e),d(re.$$.fragment,e),il=n(e),d(ce.$$.fragment,e),pl=n(e),d(de.$$.fragment,e),rl=n(e),d(me.$$.fragment,e),cl=n(e),Z=c(e,"DIV",{class:!0});var C=_(Z);d(Me.$$.fragment,C),wl=n(C),Ze=c(C,"P",{"data-svelte-h":!0}),J(Ze)!=="svelte-s68q88"&&(Ze.innerHTML=Ll),fl=n(C),W=c(C,"DIV",{class:!0});var V=_(W);d(ue.$$.fragment,V),bl=n(V),_e=c(V,"P",{"data-svelte-h":!0}),J(_e)!=="svelte-1iwtzit"&&(_e.textContent=Pl),jl=n(V),d(v.$$.fragment,V),V.forEach(a),C.forEach(a),dl=n(e),d(he.$$.fragment,e),ml=n(e),y=c(e,"DIV",{class:!0});var f=_(y);d(ge.$$.fragment,f),Ul=n(f),Ie=c(f,"P",{"data-svelte-h":!0}),J(Ie)!=="svelte-145x6hk"&&(Ie.innerHTML=ql),Zl=n(f),B=c(f,"DIV",{class:!0});var ve=_(B);d(ye.$$.fragment,ve),_l=n(ve),Ge=c(ve,"P",{"data-svelte-h":!0}),J(Ge)!=="svelte-1iwtzit"&&(Ge.textContent=Dl),Il=n(ve),d(z.$$.fragment,ve),ve.forEach(a),Gl=n(f),x=c(f,"DIV",{class:!0});var gl=_(x);d(Je.$$.fragment,gl),Wl=n(gl),We=c(gl,"P",{"data-svelte-h":!0}),J(We)!=="svelte-1s3c06i"&&(We.innerHTML=Kl),gl.forEach(a),Bl=n(f),Y=c(f,"DIV",{class:!0});var yl=_(Y);d(Te.$$.fragment,yl),Cl=n(yl),Be=c(yl,"P",{"data-svelte-h":!0}),J(Be)!=="svelte-pkn4ui"&&(Be.innerHTML=Ol),yl.forEach(a),Vl=n(f),X=c(f,"DIV",{class:!0});var Jl=_(X);d(we.$$.fragment,Jl),kl=n(Jl),Ce=c(Jl,"P",{"data-svelte-h":!0}),J(Ce)!=="svelte-14bnrb6"&&(Ce.textContent=ea),Jl.forEach(a),vl=n(f),R=c(f,"DIV",{class:!0});var Tl=_(R);d(fe.$$.fragment,Tl),zl=n(Tl),Ve=c(Tl,"P",{"data-svelte-h":!0}),J(Ve)!=="svelte-1xwrf7t"&&(Ve.textContent=la),Tl.forEach(a),xl=n(f),ke=c(f,"DIV",{class:!0});var aa=_(ke);d(be.$$.fragment,aa),aa.forEach(a),f.forEach(a),Ml=n(e),d(je.$$.fragment,e),ul=n(e),xe=c(e,"P",{}),_(xe).forEach(a),this.h()},h(){I(g,"name","hf:doc:metadata"),I(g,"content",ua),I(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),I(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,l){i(document.head,g),t(e,G,l),t(e,b,l),t(e,T,l),m(w,e,l),t(e,o,l),t(e,j,l),t(e,Ye,l),t(e,N,l),t(e,Xe,l),m(F,e,l),t(e,Re,l),t(e,A,l),t(e,Ne,l),t(e,E,l),t(e,Fe,l),m(H,e,l),t(e,Ae,l),m(Q,e,l),t(e,Ee,l),t(e,S,l),t(e,He,l),t(e,$,l),t(e,Qe,l),t(e,L,l),t(e,Se,l),m(P,e,l),t(e,$e,l),m(q,e,l),t(e,Le,l),t(e,D,l),t(e,Pe,l),m(K,e,l),t(e,qe,l),t(e,O,l),t(e,De,l),m(ee,e,l),t(e,Ke,l),t(e,le,l),t(e,Oe,l),m(ae,e,l),t(e,el,l),t(e,se,l),t(e,ll,l),m(ne,e,l),t(e,al,l),m(te,e,l),t(e,sl,l),m(oe,e,l),t(e,nl,l),m(ie,e,l),t(e,tl,l),m(pe,e,l),t(e,ol,l),m(re,e,l),t(e,il,l),m(ce,e,l),t(e,pl,l),m(de,e,l),t(e,rl,l),m(me,e,l),t(e,cl,l),t(e,Z,l),m(Me,Z,null),i(Z,wl),i(Z,Ze),i(Z,fl),i(Z,W),m(ue,W,null),i(W,bl),i(W,_e),i(W,jl),m(v,W,null),t(e,dl,l),m(he,e,l),t(e,ml,l),t(e,y,l),m(ge,y,null),i(y,Ul),i(y,Ie),i(y,Zl),i(y,B),m(ye,B,null),i(B,_l),i(B,Ge),i(B,Il),m(z,B,null),i(y,Gl),i(y,x),m(Je,x,null),i(x,Wl),i(x,We),i(y,Bl),i(y,Y),m(Te,Y,null),i(Y,Cl),i(Y,Be),i(y,Vl),i(y,X),m(we,X,null),i(X,kl),i(X,Ce),i(y,vl),i(y,R),m(fe,R,null),i(R,zl),i(R,Ve),i(y,xl),i(y,ke),m(be,ke,null),t(e,Ml,l),m(je,e,l),t(e,ul,l),t(e,xe,l),hl=!0},p(e,[l]){const C={};l&2&&(C.$$scope={dirty:l,ctx:e}),v.$set(C);const V={};l&2&&(V.$$scope={dirty:l,ctx:e}),z.$set(V)},i(e){hl||(M(w.$$.fragment,e),M(F.$$.fragment,e),M(H.$$.fragment,e),M(Q.$$.fragment,e),M(P.$$.fragment,e),M(q.$$.fragment,e),M(K.$$.fragment,e),M(ee.$$.fragment,e),M(ae.$$.fragment,e),M(ne.$$.fragment,e),M(te.$$.fragment,e),M(oe.$$.fragment,e),M(ie.$$.fragment,e),M(pe.$$.fragment,e),M(re.$$.fragment,e),M(ce.$$.fragment,e),M(de.$$.fragment,e),M(me.$$.fragment,e),M(Me.$$.fragment,e),M(ue.$$.fragment,e),M(v.$$.fragment,e),M(he.$$.fragment,e),M(ge.$$.fragment,e),M(ye.$$.fragment,e),M(z.$$.fragment,e),M(Je.$$.fragment,e),M(Te.$$.fragment,e),M(we.$$.fragment,e),M(fe.$$.fragment,e),M(be.$$.fragment,e),M(je.$$.fragment,e),hl=!0)},o(e){u(w.$$.fragment,e),u(F.$$.fragment,e),u(H.$$.fragment,e),u(Q.$$.fragment,e),u(P.$$.fragment,e),u(q.$$.fragment,e),u(K.$$.fragment,e),u(ee.$$.fragment,e),u(ae.$$.fragment,e),u(ne.$$.fragment,e),u(te.$$.fragment,e),u(oe.$$.fragment,e),u(ie.$$.fragment,e),u(pe.$$.fragment,e),u(re.$$.fragment,e),u(ce.$$.fragment,e),u(de.$$.fragment,e),u(me.$$.fragment,e),u(Me.$$.fragment,e),u(ue.$$.fragment,e),u(v.$$.fragment,e),u(he.$$.fragment,e),u(ge.$$.fragment,e),u(ye.$$.fragment,e),u(z.$$.fragment,e),u(Je.$$.fragment,e),u(Te.$$.fragment,e),u(we.$$.fragment,e),u(fe.$$.fragment,e),u(be.$$.fragment,e),u(je.$$.fragment,e),hl=!1},d(e){e&&(a(G),a(b),a(T),a(o),a(j),a(Ye),a(N),a(Xe),a(Re),a(A),a(Ne),a(E),a(Fe),a(Ae),a(Ee),a(S),a(He),a($),a(Qe),a(L),a(Se),a($e),a(Le),a(D),a(Pe),a(qe),a(O),a(De),a(Ke),a(le),a(Oe),a(el),a(se),a(ll),a(al),a(sl),a(nl),a(tl),a(ol),a(il),a(pl),a(rl),a(cl),a(Z),a(dl),a(ml),a(y),a(Ml),a(ul),a(xe)),a(g),h(w,e),h(F,e),h(H,e),h(Q,e),h(P,e),h(q,e),h(K,e),h(ee,e),h(ae,e),h(ne,e),h(te,e),h(oe,e),h(ie,e),h(pe,e),h(re,e),h(ce,e),h(de,e),h(me,e),h(Me),h(ue),h(v),h(he,e),h(ge),h(ye),h(z),h(Je),h(Te),h(we),h(fe),h(be),h(je,e)}}}const ua='{"title":"VisualCloze","local":"visualcloze","sections":[{"title":"Overview","local":"overview","sections":[],"depth":2},{"title":"Inference","local":"inference","sections":[{"title":"Model loading","local":"model-loading","sections":[],"depth":3},{"title":"Input Specifications","local":"input-specifications","sections":[{"title":"Task and Content Prompts","local":"task-and-content-prompts","sections":[],"depth":4},{"title":"Image Input Format","local":"image-input-format","sections":[],"depth":4},{"title":"Resolution Control","local":"resolution-control","sections":[],"depth":4}],"depth":3},{"title":"Examples","local":"examples","sections":[{"title":"Example for mask2image","local":"example-for-mask2image","sections":[],"depth":4},{"title":"Example for edge-detection","local":"example-for-edge-detection","sections":[],"depth":4},{"title":"Example for subject-driven generation","local":"example-for-subject-driven-generation","sections":[],"depth":4},{"title":"Utilize each pipeline independently","local":"utilize-each-pipeline-independently","sections":[],"depth":4}],"depth":3}],"depth":2},{"title":"VisualClozePipeline","local":"diffusers.VisualClozePipeline","sections":[],"depth":2},{"title":"VisualClozeGenerationPipeline","local":"diffusers.VisualClozeGenerationPipeline","sections":[],"depth":2}],"depth":1}';function ha(Ue){return oa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ba extends ia{constructor(g){super(),pa(this,g,ha,Ma,ta,{})}}export{ba as component}; | |
Xet Storage Details
- Size:
- 106 kB
- Xet hash:
- 70d38b7467537f19787deab2f72ad9490edf4aa50706d217cf1c75f1756e99d8
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.