Buckets:

rtrm's picture
download
raw
25.8 kB
import{s as ve,o as ye,n as xe}from"../chunks/scheduler.53228c21.js";import{S as Ce,i as Pe,e as d,s as i,c as w,h as Te,a as c,d as o,b as s,f as F,g as b,j as V,k as J,l as p,m as r,n as v,t as y,o as x,p as C}from"../chunks/index.100fac89.js";import{D as ne}from"../chunks/Docstring.85467d5f.js";import{C as $e}from"../chunks/CodeBlock.d30a6509.js";import{E as Ve}from"../chunks/ExampleCodeBlock.c0a2ad65.js";import{H as ge,E as ke}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.9bafb610.js";function Me(B){let a,k="Examples:",_,m,f;return m=new $e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ29nVmlldzRQaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBDb2dWaWV3NFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJUSFVETSUyRkNvZ1ZpZXc0LTZCJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHBob3RvJTIwb2YlMjBhbiUyMGFzdHJvbmF1dCUyMHJpZGluZyUyMGElMjBob3JzZSUyMG9uJTIwbWFycyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> CogView4Pipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = CogView4Pipeline.from_pretrained(<span class="hljs-string">&quot;THUDM/CogView4-6B&quot;</span>, torch_dtype=torch.bfloat16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A photo of an astronaut riding a horse on mars&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image.save(<span class="hljs-string">&quot;output.png&quot;</span>)`,wrap:!1}}),{c(){a=d("p"),a.textContent=k,_=i(),w(m.$$.fragment)},l(n){a=c(n,"P",{"data-svelte-h":!0}),V(a)!=="svelte-kvfsh7"&&(a.textContent=k),_=s(n),b(m.$$.fragment,n)},m(n,u){r(n,a,u),r(n,_,u),v(m,n,u),f=!0},p:xe,i(n){f||(y(m.$$.fragment,n),f=!0)},o(n){x(m.$$.fragment,n),f=!1},d(n){n&&(o(a),o(_)),C(m,n)}}}function Le(B){let a,k,_,m,f,n,u,me='<p>Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.</p>',S,M,fe='This pipeline was contributed by <a href="https://github.com/zRzRzRzRzRzRzR" rel="nofollow">zRzRzRzRzRzRzR</a>. The original codebase can be found <a href="https://huggingface.co/THUDM" rel="nofollow">here</a>. The original weights can be found under <a href="https://huggingface.co/THUDM" rel="nofollow">hf.co/THUDM</a>.',A,L,W,l,I,ie,O,ue="Pipeline for text-to-image generation using CogView4.",se,j,_e=`This model inherits from <a href="/docs/diffusers/pr_12807/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,re,h,z,ae,E,he="Function invoked when calling the pipeline for generation.",pe,T,le,$,D,de,G,we="Encodes the prompt into text encoder hidden states.",Q,N,X,P,U,ce,H,be="Output class for CogView3 pipelines.",Y,q,K,Z,ee;return f=new ge({props:{title:"CogView4",local:"cogview4",headingTag:"h1"}}),L=new ge({props:{title:"CogView4Pipeline",local:"diffusers.CogView4Pipeline",headingTag:"h2"}}),I=new ne({props:{name:"class diffusers.CogView4Pipeline",anchor:"diffusers.CogView4Pipeline",parameters:[{name:"tokenizer",val:": AutoTokenizer"},{name:"text_encoder",val:": GlmModel"},{name:"vae",val:": AutoencoderKL"},{name:"transformer",val:": CogView4Transformer2DModel"},{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12807/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.CogView4Pipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>GLMModel</code>) &#x2014;
Frozen text-encoder. CogView4 uses <a href="https://huggingface.co/THUDM/glm-4-9b-hf" rel="nofollow">glm-4-9b-hf</a>.`,name:"text_encoder"},{anchor:"diffusers.CogView4Pipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>PreTrainedTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizer" rel="nofollow">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.CogView4Pipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12807/en/api/models/cogview4_transformer2d#diffusers.CogView4Transformer2DModel">CogView4Transformer2DModel</a>) &#x2014;
A text conditioned <code>CogView4Transformer2DModel</code> to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.CogView4Pipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12807/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12807/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L137"}}),z=new ne({props:{name:"__call__",anchor:"diffusers.CogView4Pipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.Optional[typing.List[int]] = None"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"original_size",val:": typing.Optional[typing.Tuple[int, int]] = None"},{name:"crops_coords_top_left",val:": typing.Tuple[int, int] = (0, 0)"},{name:"output_type",val:": str = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Union[typing.Callable[[int, int, typing.Dict], NoneType], diffusers.callbacks.PipelineCallback, diffusers.callbacks.MultiPipelineCallbacks, NoneType] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.transformer.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. If not provided, it is set to 1024.`,name:"height"},{anchor:"diffusers.CogView4Pipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.transformer.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. If not provided it is set to 1024.`,name:"width"},{anchor:"diffusers.CogView4Pipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to <code>50</code>) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.CogView4Pipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.CogView4Pipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.CogView4Pipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to <code>5.0</code>) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.CogView4Pipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to <code>1</code>) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.CogView4Pipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.CogView4Pipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.CogView4Pipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
If <code>original_size</code> is not the same as <code>target_size</code> the image will appear to be down- or upsampled.
<code>original_size</code> defaults to <code>(height, width)</code> if not specified. Part of SDXL&#x2019;s micro-conditioning as
explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"original_size"},{anchor:"diffusers.CogView4Pipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
<code>crops_coords_top_left</code> can be used to generate an image that appears to be &#x201C;cropped&#x201D; from the position
<code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting
<code>crops_coords_top_left</code> to (0, 0). Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.CogView4Pipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.CogView4Pipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> instead
of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.CogView4Pipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.CogView4Pipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.CogView4Pipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.CogView4Pipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, defaults to <code>224</code>) &#x2014;
Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12807/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L402",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput</code> if <code>return_dict</code> is True, otherwise a
<code>tuple</code>. When returning a tuple, the first element is a list with the generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput</code> or <code>tuple</code></p>
`}}),T=new Ve({props:{anchor:"diffusers.CogView4Pipeline.__call__.example",$$slots:{default:[Me]},$$scope:{ctx:B}}}),D=new ne({props:{name:"encode_prompt",anchor:"diffusers.CogView4Pipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.CogView4Pipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether to use classifier free guidance or not.`,name:"do_classifier_free_guidance"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
Number of images that should be generated per prompt. torch device to place the resulting embeddings on`,name:"num_images_per_prompt"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device`,name:"device"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.dtype",description:`<strong>dtype</strong> &#x2014; (<code>torch.dtype</code>, <em>optional</em>):
torch dtype`,name:"dtype"},{anchor:"diffusers.CogView4Pipeline.encode_prompt.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, defaults to <code>1024</code>) &#x2014;
Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12807/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L221"}}),N=new ge({props:{title:"CogView4PipelineOutput",local:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",headingTag:"h2"}}),U=new ne({props:{name:"class diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",anchor:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) &#x2014;
List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12807/src/diffusers/pipelines/cogview4/pipeline_output.py#L11"}}),q=new ke({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/cogview4.md"}}),{c(){a=d("meta"),k=i(),_=d("p"),m=i(),w(f.$$.fragment),n=i(),u=d("blockquote"),u.innerHTML=me,S=i(),M=d("p"),M.innerHTML=fe,A=i(),w(L.$$.fragment),W=i(),l=d("div"),w(I.$$.fragment),ie=i(),O=d("p"),O.textContent=ue,se=i(),j=d("p"),j.innerHTML=_e,re=i(),h=d("div"),w(z.$$.fragment),ae=i(),E=d("p"),E.textContent=he,pe=i(),w(T.$$.fragment),le=i(),$=d("div"),w(D.$$.fragment),de=i(),G=d("p"),G.textContent=we,Q=i(),w(N.$$.fragment),X=i(),P=d("div"),w(U.$$.fragment),ce=i(),H=d("p"),H.textContent=be,Y=i(),w(q.$$.fragment),K=i(),Z=d("p"),this.h()},l(e){const t=Te("svelte-u9bgzb",document.head);a=c(t,"META",{name:!0,content:!0}),t.forEach(o),k=s(e),_=c(e,"P",{}),F(_).forEach(o),m=s(e),b(f.$$.fragment,e),n=s(e),u=c(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),V(u)!=="svelte-r1jcqf"&&(u.innerHTML=me),S=s(e),M=c(e,"P",{"data-svelte-h":!0}),V(M)!=="svelte-14a2tju"&&(M.innerHTML=fe),A=s(e),b(L.$$.fragment,e),W=s(e),l=c(e,"DIV",{class:!0});var g=F(l);b(I.$$.fragment,g),ie=s(g),O=c(g,"P",{"data-svelte-h":!0}),V(O)!=="svelte-o2pbdw"&&(O.textContent=ue),se=s(g),j=c(g,"P",{"data-svelte-h":!0}),V(j)!=="svelte-1oo25qp"&&(j.innerHTML=_e),re=s(g),h=c(g,"DIV",{class:!0});var R=F(h);b(z.$$.fragment,R),ae=s(R),E=c(R,"P",{"data-svelte-h":!0}),V(E)!=="svelte-v78lg8"&&(E.textContent=he),pe=s(R),b(T.$$.fragment,R),R.forEach(o),le=s(g),$=c(g,"DIV",{class:!0});var te=F($);b(D.$$.fragment,te),de=s(te),G=c(te,"P",{"data-svelte-h":!0}),V(G)!=="svelte-16q0ax1"&&(G.textContent=we),te.forEach(o),g.forEach(o),Q=s(e),b(N.$$.fragment,e),X=s(e),P=c(e,"DIV",{class:!0});var oe=F(P);b(U.$$.fragment,oe),ce=s(oe),H=c(oe,"P",{"data-svelte-h":!0}),V(H)!=="svelte-zlr6ro"&&(H.textContent=be),oe.forEach(o),Y=s(e),b(q.$$.fragment,e),K=s(e),Z=c(e,"P",{}),F(Z).forEach(o),this.h()},h(){J(a,"name","hf:doc:metadata"),J(a,"content",Ie),J(u,"class","tip"),J(h,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(l,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){p(document.head,a),r(e,k,t),r(e,_,t),r(e,m,t),v(f,e,t),r(e,n,t),r(e,u,t),r(e,S,t),r(e,M,t),r(e,A,t),v(L,e,t),r(e,W,t),r(e,l,t),v(I,l,null),p(l,ie),p(l,O),p(l,se),p(l,j),p(l,re),p(l,h),v(z,h,null),p(h,ae),p(h,E),p(h,pe),v(T,h,null),p(l,le),p(l,$),v(D,$,null),p($,de),p($,G),r(e,Q,t),v(N,e,t),r(e,X,t),r(e,P,t),v(U,P,null),p(P,ce),p(P,H),r(e,Y,t),v(q,e,t),r(e,K,t),r(e,Z,t),ee=!0},p(e,[t]){const g={};t&2&&(g.$$scope={dirty:t,ctx:e}),T.$set(g)},i(e){ee||(y(f.$$.fragment,e),y(L.$$.fragment,e),y(I.$$.fragment,e),y(z.$$.fragment,e),y(T.$$.fragment,e),y(D.$$.fragment,e),y(N.$$.fragment,e),y(U.$$.fragment,e),y(q.$$.fragment,e),ee=!0)},o(e){x(f.$$.fragment,e),x(L.$$.fragment,e),x(I.$$.fragment,e),x(z.$$.fragment,e),x(T.$$.fragment,e),x(D.$$.fragment,e),x(N.$$.fragment,e),x(U.$$.fragment,e),x(q.$$.fragment,e),ee=!1},d(e){e&&(o(k),o(_),o(m),o(n),o(u),o(S),o(M),o(A),o(W),o(l),o(Q),o(X),o(P),o(Y),o(K),o(Z)),o(a),C(f,e),C(L,e),C(I),C(z),C(T),C(D),C(N,e),C(U),C(q,e)}}}const Ie='{"title":"CogView4","local":"cogview4","sections":[{"title":"CogView4Pipeline","local":"diffusers.CogView4Pipeline","sections":[],"depth":2},{"title":"CogView4PipelineOutput","local":"diffusers.pipelines.cogview4.pipeline_output.CogView4PipelineOutput","sections":[],"depth":2}],"depth":1}';function ze(B){return ye(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class je extends Ce{constructor(a){super(),Pe(this,a,ze,Le,ve,{})}}export{je as component};

Xet Storage Details

Size:
25.8 kB
·
Xet hash:
586f318ee13c690bdc4424013707db7fc90213030dca02de14e7bdc07b605a72

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.