Buckets:

rtrm's picture
download
raw
26 kB
import{s as ve,o as ye,n as be}from"../chunks/scheduler.8c3d61f6.js";import{S as xe,i as Ce,g as c,s,r as h,A as $e,h as g,f as n,c as r,j as F,u as w,x as M,k as Z,y as l,a,v as b,d as v,t as y,w as x}from"../chunks/index.da70eac4.js";import{T as Pe}from"../chunks/Tip.1d9b8c37.js";import{D as oe}from"../chunks/Docstring.c021b19a.js";import{C as Te}from"../chunks/CodeBlock.a9c4becf.js";import{E as Ve}from"../chunks/ExampleCodeBlock.56b4589c.js";import{H as ge,E as ke}from"../chunks/getInferenceSnippets.725ed3d4.js";function Me(j){let o,C='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){o=c("p"),o.innerHTML=C},l(p){o=g(p,"P",{"data-svelte-h":!0}),M(o)!=="svelte-1qn15hi"&&(o.innerHTML=C)},m(p,m){a(p,o,m)},p:be,d(p){p&&n(o)}}}function Ie(j){let o,C="Examples:",p,m,u;return m=new Te({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ29nVmlldzRQaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBDb2dWaWV3NFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJUSFVETSUyRkNvZ1ZpZXc0LTZCJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHBob3RvJTIwb2YlMjBhbiUyMGFzdHJvbmF1dCUyMHJpZGluZyUyMGElMjBob3JzZSUyMG9uJTIwbWFycyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> CogView4Pipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = CogView4Pipeline.from_pretrained(<span class="hljs-string">&quot;THUDM/CogView4-6B&quot;</span>, torch_dtype=torch.bfloat16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A photo of an astronaut riding a horse on mars&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image.save(<span class="hljs-string">&quot;output.png&quot;</span>)`,wrap:!1}}),{c(){o=c("p"),o.textContent=C,p=s(),h(m.$$.fragment)},l(i){o=g(i,"P",{"data-svelte-h":!0}),M(o)!=="svelte-kvfsh7"&&(o.textContent=C),p=r(i),w(m.$$.fragment,i)},m(i,_){a(i,o,_),a(i,p,_),b(m,i,_),u=!0},p:be,i(i){u||(v(m.$$.fragment,i),u=!0)},o(i){y(m.$$.fragment,i),u=!1},d(i){i&&(n(o),n(p)),x(m,i)}}}function Le(j){let o,C,p,m,u,i,_,A,I,me='This pipeline was contributed by <a href="https://github.com/zRzRzRzRzRzRzR" rel="nofollow">zRzRzRzRzRzRzR</a>. The original codebase can be found <a href="https://huggingface.co/THUDM" rel="nofollow">here</a>. The original weights can be found under <a href="https://huggingface.co/THUDM" rel="nofollow">hf.co/THUDM</a>.',B,L,W,d,z,ie,O,fe="Pipeline for text-to-image generation using CogView4.",se,E,ue=`This model inherits from <a href="/docs/diffusers/pr_12229/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,re,$,D,ae,G,_e="Function invoked when calling the pipeline for generation.",pe,V,le,k,N,de,H,he="Encodes the prompt into text encoder hidden states.",Q,J,X,P,U,ce,R,we="Output class for CogView3 pipelines.",Y,q,K,S,ee;return u=new ge({props:{title:"CogView4",local:"cogview4",headingTag:"h1"}}),_=new Pe({props:{$$slots:{default:[Me]},$$scope:{ctx:j}}}),L=new ge({props:{title:"CogView4Pipeline",local:"diffusers.CogView4Pipeline",headingTag:"h2"}}),z=new oe({props:{name:"class diffusers.CogView4Pipeline",anchor:"diffusers.CogView4Pipeline",parameters:[{name:"tokenizer",val:": AutoTokenizer"},{name:"text_encoder",val:": GlmModel"},{name:"vae",val:": AutoencoderKL"},{name:"transformer",val:": CogView4Transformer2DModel"},{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12229/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.CogView4Pipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>GLMModel</code>) &#x2014;
Frozen text-encoder. CogView4 uses <a href="https://huggingface.co/THUDM/glm-4-9b-hf" rel="nofollow">glm-4-9b-hf</a>.`,name:"text_encoder"},{anchor:"diffusers.CogView4Pipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>PreTrainedTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizer" rel="nofollow">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.CogView4Pipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12229/en/api/models/cogview4_transformer2d#diffusers.CogView4Transformer2DModel">CogView4Transformer2DModel</a>) &#x2014;
A text conditioned <code>CogView4Transformer2DModel</code> to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.CogView4Pipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12229/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12229/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L137"}}),D=new oe({props:{name:"__call__",anchor:"diffusers.CogView4Pipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.Optional[typing.List[int]] = None"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"original_size",val:": typing.Optional[typing.Tuple[int, int]] = None"},{name:"crops_coords_top_left",val:": typing.Tuple[int, int] = (0, 0)"},{name:"output_type",val:": str = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Union[typing.Callable[[int, int, typing.Dict], NoneType], diffusers.callbacks.PipelineCallback, diffusers.callbacks.MultiPipelineCallbacks, NoneType] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.transformer.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. If not provided, it is set to 1024.`,name:"height"},{anchor:"diffusers.CogView4Pipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.transformer.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. If not provided it is set to 1024.`,name:"width"},{anchor:"diffusers.CogView4Pipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to <code>50</code>) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.CogView4Pipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.CogView4Pipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.CogView4Pipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to <code>5.0</code>) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.CogView4Pipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to <code>1</code>) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.CogView4Pipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.CogView4Pipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
If <code>original_size</code> is not the same as <code>target_size</code> the image will appear to be down- or upsampled.
<code>original_size</code> defaults to <code>(height, width)</code> if not specified. Part of SDXL&#x2019;s micro-conditioning as
explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"original_size"},{anchor:"diffusers.CogView4Pipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
<code>crops_coords_top_left</code> can be used to generate an image that appears to be &#x201C;cropped&#x201D; from the position
<code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting
<code>crops_coords_top_left</code> to (0, 0). Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.CogView4Pipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.CogView4Pipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> instead
of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.CogView4Pipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.CogView4Pipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.CogView4Pipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.CogView4Pipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, defaults to <code>224</code>) &#x2014;
Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12229/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L402",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput</code> if <code>return_dict</code> is True, otherwise a
<code>tuple</code>. When returning a tuple, the first element is a list with the generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput</code> or <code>tuple</code></p>
`}}),V=new Ve({props:{anchor:"diffusers.CogView4Pipeline.__call__.example",$$slots:{default:[Ie]},$$scope:{ctx:j}}}),N=new oe({props:{name:"encode_prompt",anchor:"diffusers.CogView4Pipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
Number of images that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_images_per_prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device`,name:"device"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.dtype",description:`<strong>dtype</strong> &#x2014; (<code>torch.dtype</code>, <em>optional</em>):
torch dtype`,name:"dtype"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, defaults to <code>1024</code>) &#x2014;
Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12229/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L221"}}),J=new ge({props:{title:"CogView4PipelineOutput",local:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",headingTag:"h2"}}),U=new oe({props:{name:"class diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",anchor:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) &#x2014;
List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12229/src/diffusers/pipelines/cogview4/pipeline_output.py#L11"}}),q=new ke({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/cogview4.md"}}),{c(){o=c("meta"),C=s(),p=c("p"),m=s(),h(u.$$.fragment),i=s(),h(_.$$.fragment),A=s(),I=c("p"),I.innerHTML=me,B=s(),h(L.$$.fragment),W=s(),d=c("div"),h(z.$$.fragment),ie=s(),O=c("p"),O.textContent=fe,se=s(),E=c("p"),E.innerHTML=ue,re=s(),$=c("div"),h(D.$$.fragment),ae=s(),G=c("p"),G.textContent=_e,pe=s(),h(V.$$.fragment),le=s(),k=c("div"),h(N.$$.fragment),de=s(),H=c("p"),H.textContent=he,Q=s(),h(J.$$.fragment),X=s(),P=c("div"),h(U.$$.fragment),ce=s(),R=c("p"),R.textContent=we,Y=s(),h(q.$$.fragment),K=s(),S=c("p"),this.h()},l(e){const t=$e("svelte-u9bgzb",document.head);o=g(t,"META",{name:!0,content:!0}),t.forEach(n),C=r(e),p=g(e,"P",{}),F(p).forEach(n),m=r(e),w(u.$$.fragment,e),i=r(e),w(_.$$.fragment,e),A=r(e),I=g(e,"P",{"data-svelte-h":!0}),M(I)!=="svelte-14a2tju"&&(I.innerHTML=me),B=r(e),w(L.$$.fragment,e),W=r(e),d=g(e,"DIV",{class:!0});var f=F(d);w(z.$$.fragment,f),ie=r(f),O=g(f,"P",{"data-svelte-h":!0}),M(O)!=="svelte-o2pbdw"&&(O.textContent=fe),se=r(f),E=g(f,"P",{"data-svelte-h":!0}),M(E)!=="svelte-rvpa7"&&(E.innerHTML=ue),re=r(f),$=g(f,"DIV",{class:!0});var T=F($);w(D.$$.fragment,T),ae=r(T),G=g(T,"P",{"data-svelte-h":!0}),M(G)!=="svelte-v78lg8"&&(G.textContent=_e),pe=r(T),w(V.$$.fragment,T),T.forEach(n),le=r(f),k=g(f,"DIV",{class:!0});var te=F(k);w(N.$$.fragment,te),de=r(te),H=g(te,"P",{"data-svelte-h":!0}),M(H)!=="svelte-16q0ax1"&&(H.textContent=he),te.forEach(n),f.forEach(n),Q=r(e),w(J.$$.fragment,e),X=r(e),P=g(e,"DIV",{class:!0});var ne=F(P);w(U.$$.fragment,ne),ce=r(ne),R=g(ne,"P",{"data-svelte-h":!0}),M(R)!=="svelte-zlr6ro"&&(R.textContent=we),ne.forEach(n),Y=r(e),w(q.$$.fragment,e),K=r(e),S=g(e,"P",{}),F(S).forEach(n),this.h()},h(){Z(o,"name","hf:doc:metadata"),Z(o,"content",ze),Z($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),Z(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),Z(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),Z(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){l(document.head,o),a(e,C,t),a(e,p,t),a(e,m,t),b(u,e,t),a(e,i,t),b(_,e,t),a(e,A,t),a(e,I,t),a(e,B,t),b(L,e,t),a(e,W,t),a(e,d,t),b(z,d,null),l(d,ie),l(d,O),l(d,se),l(d,E),l(d,re),l(d,$),b(D,$,null),l($,ae),l($,G),l($,pe),b(V,$,null),l(d,le),l(d,k),b(N,k,null),l(k,de),l(k,H),a(e,Q,t),b(J,e,t),a(e,X,t),a(e,P,t),b(U,P,null),l(P,ce),l(P,R),a(e,Y,t),b(q,e,t),a(e,K,t),a(e,S,t),ee=!0},p(e,[t]){const f={};t&2&&(f.$$scope={dirty:t,ctx:e}),_.$set(f);const T={};t&2&&(T.$$scope={dirty:t,ctx:e}),V.$set(T)},i(e){ee||(v(u.$$.fragment,e),v(_.$$.fragment,e),v(L.$$.fragment,e),v(z.$$.fragment,e),v(D.$$.fragment,e),v(V.$$.fragment,e),v(N.$$.fragment,e),v(J.$$.fragment,e),v(U.$$.fragment,e),v(q.$$.fragment,e),ee=!0)},o(e){y(u.$$.fragment,e),y(_.$$.fragment,e),y(L.$$.fragment,e),y(z.$$.fragment,e),y(D.$$.fragment,e),y(V.$$.fragment,e),y(N.$$.fragment,e),y(J.$$.fragment,e),y(U.$$.fragment,e),y(q.$$.fragment,e),ee=!1},d(e){e&&(n(C),n(p),n(m),n(i),n(A),n(I),n(B),n(W),n(d),n(Q),n(X),n(P),n(Y),n(K),n(S)),n(o),x(u,e),x(_,e),x(L,e),x(z),x(D),x(V),x(N),x(J,e),x(U),x(q,e)}}}const ze='{"title":"CogView4","local":"cogview4","sections":[{"title":"CogView4Pipeline","local":"diffusers.CogView4Pipeline","sections":[],"depth":2},{"title":"CogView4PipelineOutput","local":"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput","sections":[],"depth":2}],"depth":1}';function De(j){return ye(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ge extends xe{constructor(o){super(),Ce(this,o,De,Le,ve,{})}}export{Ge as component};

Xet Storage Details

Size:
26 kB
·
Xet hash:
b9b745ea11652571c452d2cc269e40a7a687024b25add21b707bda96d3780150

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.