agentbot commited on
Commit
f32af0f
·
verified ·
1 Parent(s): bf1a230

Initial commit with folder contents

Browse files
.gitattributes CHANGED
@@ -36,5 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  cached_pipe/text_encoder filter=lfs diff=lfs merge=lfs -text
37
  cached_pipe/text_encoder_2 filter=lfs diff=lfs merge=lfs -text
38
  cached_pipe/vae.decoder filter=lfs diff=lfs merge=lfs -text
39
- cached_pipe/fast_unet filter=lfs diff=lfs merge=lfs -text
40
  cached_pipe/unet filter=lfs diff=lfs merge=lfs -text
 
36
  cached_pipe/text_encoder filter=lfs diff=lfs merge=lfs -text
37
  cached_pipe/text_encoder_2 filter=lfs diff=lfs merge=lfs -text
38
  cached_pipe/vae.decoder filter=lfs diff=lfs merge=lfs -text
 
39
  cached_pipe/unet filter=lfs diff=lfs merge=lfs -text
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
cache_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"repository": "https://huggingface.co/silencer107/poo3", "revision": "1a46d0d"}
cached_pipe/text_encoder CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:386b7cad4378861ad4fb7ecb4dee107bf7fe28c76668bea03a0dc084a210aced
3
- size 2728173
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e05bb0bdc1e7bb9af8b5524fce5523d4ceadfc12037fce22cf224e9797f9c31
3
+ size 2728178
cached_pipe/text_encoder_2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f97e11242c00f86337bd9801ad4820b68c99918a922c8542922be505c2bb430
3
- size 9363341
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f85126611ab3b28ea283da1b1a77eeae6937d3ed3c5fc9413c8b40418165fd
3
+ size 9363012
cached_pipe/unet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e600471d0d62f0d13d24a2c2e79e49eae4709d02c8218912029e2b5eda7c457f
3
- size 676786352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca58697e0025abc011c46f214df6e0850206b11287a91e79b4fe631d1a8adaa
3
+ size 687271819
cached_pipe/vae.decoder CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3986989d6743d07e59fb46dff1a13456b6fe41fe5bc9f635e194be7e01e73583
3
- size 187873926
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af3d1483221ecf9f753b862fed022729c3a793f1d85740d2cb93a9a8a8e995cf
3
+ size 187873891
loss_params.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27ba04dc09bfe8325c2b8d8acbfa5fbf746f61169cf1cdfe07d028ad697217f1
3
- size 3568
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c8d58c214ba22a6aeacea98cef1a4b88fb88c8d0ed113f39b6a60b0165b3bdb
3
+ size 3952
pyproject.toml CHANGED
@@ -11,7 +11,7 @@ dependencies = [
11
  "diffusers==0.28.2",
12
  "onediff==1.2.0",
13
  "onediffx==1.2.0",
14
- "accelerate==0.31.0",
15
  "numpy==1.26.4",
16
  "xformers==0.0.25.post1",
17
  "triton==2.2.0",
@@ -20,18 +20,13 @@ dependencies = [
20
  "omegaconf==2.3.0",
21
  "torch==2.2.2",
22
  "torchvision==0.17.2",
23
- "huggingface-hub==0.25.2",
 
24
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
25
- "oneflow",
26
- "setuptools>=75.2.0",
27
- "bitsandbytes>=0.44.1",
28
- "stable-fast",
29
- "tomesd>=0.1.3",
30
  ]
31
 
32
  [tool.uv.sources]
33
  oneflow = { url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }
34
- stable-fast = { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" }
35
 
36
  [project.scripts]
37
- start_inference = "main:main"
 
11
  "diffusers==0.28.2",
12
  "onediff==1.2.0",
13
  "onediffx==1.2.0",
14
+ "oneflow",
15
  "numpy==1.26.4",
16
  "xformers==0.0.25.post1",
17
  "triton==2.2.0",
 
20
  "omegaconf==2.3.0",
21
  "torch==2.2.2",
22
  "torchvision==0.17.2",
23
+ "huggingface_hub==0.24.7",
24
+ "setuptools==75.2.0",
25
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
 
 
 
 
 
26
  ]
27
 
28
  [tool.uv.sources]
29
  oneflow = { url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }
 
30
 
31
  [project.scripts]
32
+ start_inference = "main:main"
src/__pycache__/loss.cpython-310.pyc ADDED
Binary file (5.17 kB). View file
 
src/__pycache__/main.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
src/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
src/edge_maxxing_4090_newdream.egg-info/PKG-INFO ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: edge-maxxing-4090-newdream
3
+ Version: 6
4
+ Summary: An edge-maxxing model submission for the 4090 newdream contest
5
+ Requires-Python: <3.11,>=3.10
6
+ Requires-Dist: diffusers==0.28.2
7
+ Requires-Dist: onediff==1.2.0
8
+ Requires-Dist: onediffx==1.2.0
9
+ Requires-Dist: oneflow
10
+ Requires-Dist: numpy==1.26.4
11
+ Requires-Dist: xformers==0.0.25.post1
12
+ Requires-Dist: triton==2.2.0
13
+ Requires-Dist: transformers==4.41.2
14
+ Requires-Dist: accelerate==0.31.0
15
+ Requires-Dist: omegaconf==2.3.0
16
+ Requires-Dist: torch==2.2.2
17
+ Requires-Dist: torchvision==0.17.2
18
+ Requires-Dist: huggingface_hub==0.24.7
19
+ Requires-Dist: setuptools==75.2.0
20
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines
src/edge_maxxing_4090_newdream.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/loss.py
4
+ src/main.py
5
+ src/pipeline.py
6
+ src/edge_maxxing_4090_newdream.egg-info/PKG-INFO
7
+ src/edge_maxxing_4090_newdream.egg-info/SOURCES.txt
8
+ src/edge_maxxing_4090_newdream.egg-info/dependency_links.txt
9
+ src/edge_maxxing_4090_newdream.egg-info/entry_points.txt
10
+ src/edge_maxxing_4090_newdream.egg-info/requires.txt
11
+ src/edge_maxxing_4090_newdream.egg-info/top_level.txt
src/edge_maxxing_4090_newdream.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/edge_maxxing_4090_newdream.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/edge_maxxing_4090_newdream.egg-info/requires.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.28.2
2
+ onediff==1.2.0
3
+ onediffx==1.2.0
4
+ oneflow
5
+ numpy==1.26.4
6
+ xformers==0.0.25.post1
7
+ triton==2.2.0
8
+ transformers==4.41.2
9
+ accelerate==0.31.0
10
+ omegaconf==2.3.0
11
+ torch==2.2.2
12
+ torchvision==0.17.2
13
+ huggingface_hub==0.24.7
14
+ setuptools==75.2.0
15
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines
src/edge_maxxing_4090_newdream.egg-info/top_level.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ loss
2
+ main
3
+ pipeline
src/loss.py CHANGED
@@ -42,4 +42,4 @@ class SchedulerWrapper:
42
  for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G)
43
  H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D
44
  def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model)
45
- def prepare_loss(A,num_accelerate_steps=15):A.load_loss_params()
 
42
  for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G)
43
  H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D
44
  def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model)
45
+ def prepare_loss(A,num_accelerate_steps=16):A.load_loss_params()
src/pipeline.py CHANGED
@@ -1,982 +1,47 @@
1
  import torch
2
- from PIL import Image
 
3
  from pipelines.models import TextToImageRequest
4
  from torch import Generator
5
- import json
6
- from diffusers import StableDiffusionXLPipeline, DDIMScheduler
7
- import inspect
8
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
  from loss import SchedulerWrapper
10
- from onediffx import compile_pipe,load_pipe
11
- # Import necessary components
12
- from transformers import (
13
- CLIPImageProcessor,
14
- CLIPTextModel,
15
- CLIPTextModelWithProjection,
16
- CLIPTokenizer,
17
- CLIPVisionModelWithProjection,
18
- )
19
-
20
-
21
- from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
22
- from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
23
- from diffusers.loaders import (
24
- FromSingleFileMixin,
25
- IPAdapterMixin,
26
- StableDiffusionXLLoraLoaderMixin,
27
- TextualInversionLoaderMixin,
28
- )
29
- from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
30
- from diffusers.models.attention_processor import (
31
- AttnProcessor2_0,
32
- FusedAttnProcessor2_0,
33
- XFormersAttnProcessor,
34
- )
35
- from diffusers.models.lora import adjust_lora_scale_text_encoder
36
- from diffusers.schedulers import KarrasDiffusionSchedulers
37
- from diffusers.utils import (
38
- USE_PEFT_BACKEND,
39
- deprecate,
40
- is_invisible_watermark_available,
41
- is_torch_xla_available,
42
- logging,
43
- replace_example_docstring,
44
- scale_lora_layers,
45
- unscale_lora_layers,
46
- )
47
- from diffusers.utils.torch_utils import randn_tensor
48
- from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
- from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
50
-
51
- # Import watermark if available
52
- if is_invisible_watermark_available():
53
- from .watermark import StableDiffusionXLWatermarker
54
-
55
- # Check for XLA availability
56
- if is_torch_xla_available():
57
- import torch_xla.core.xla_model as xm
58
- XLA_AVAILABLE = True
59
- else:
60
- XLA_AVAILABLE = False
61
-
62
- logger = logging.get_logger(__name__)
63
-
64
- # Constants
65
- EXAMPLE_DOC_STRING = """
66
- Examples:
67
- ```py
68
- >>> import torch
69
- >>> from diffusers import StableDiffusionXLPipeline
70
-
71
- >>> pipe = StableDiffusionXLPipeline.from_pretrained(
72
- >>> "stabilityai/stable-diffusion-xl-base-1.0",
73
- >>> torch_dtype=torch.float16
74
- >>> )
75
- >>> pipe = pipe.to("cuda")
76
-
77
- >>> prompt = "a photo of an astronaut riding a horse on mars"
78
- >>> image = pipe(prompt).images[0]
79
- ```
80
- """
81
-
82
- # Helper functions
83
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
84
- """Rescale noise configuration."""
85
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
86
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
87
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
88
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
89
- return noise_cfg
90
-
91
- # Utils functions
92
- import numpy as np
93
- def max_pixel_filter(image: Image) -> Image:
94
- try:
95
- # Convert the image to a numpy array
96
- img_array = np.array(image)
97
- # Find the maximum pixel value in the image
98
- max_val = img_array.max()
99
- # Reduce the maximum value to 1
100
- img_array[img_array == max_val] -= 2
101
- # Convert the numpy array back to an image
102
- filtered_image = Image.fromarray(img_array)
103
- return filtered_image
104
- except:
105
- return image
106
-
107
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
108
- def retrieve_timesteps(
109
- scheduler,
110
- num_inference_steps: Optional[int] = None,
111
- device: Optional[Union[str, torch.device]] = None,
112
- timesteps: Optional[List[int]] = None,
113
- sigmas: Optional[List[float]] = None,
114
- **kwargs,
115
- ):
116
- if timesteps is not None and sigmas is not None:
117
- raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
118
- if timesteps is not None:
119
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
120
- if not accepts_timesteps:
121
- raise ValueError(
122
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
123
- f" timestep schedules. Please check whether you are using the correct scheduler."
124
- )
125
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
126
- timesteps = scheduler.timesteps
127
- num_inference_steps = len(timesteps)
128
- elif sigmas is not None:
129
- accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
130
- if not accept_sigmas:
131
- raise ValueError(
132
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
133
- f" sigmas schedules. Please check whether you are using the correct scheduler."
134
- )
135
- scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
136
- timesteps = scheduler.timesteps
137
- num_inference_steps = len(timesteps)
138
- else:
139
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
140
- timesteps = scheduler.timesteps
141
- return timesteps, num_inference_steps
142
-
143
-
144
- class StableDiffusionXLPipeline_new(
145
- DiffusionPipeline,
146
- StableDiffusionMixin,
147
- FromSingleFileMixin,
148
- StableDiffusionXLLoraLoaderMixin,
149
- TextualInversionLoaderMixin,
150
- IPAdapterMixin,
151
- ):
152
-
153
- model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
154
- _optional_components = [
155
- "tokenizer",
156
- "tokenizer_2",
157
- "text_encoder",
158
- "text_encoder_2",
159
- "image_encoder",
160
- "feature_extractor",
161
- ]
162
- _callback_tensor_inputs = [
163
- "latents",
164
- "prompt_embeds",
165
- "negative_prompt_embeds",
166
- "add_text_embeds",
167
- "add_time_ids",
168
- "negative_pooled_prompt_embeds",
169
- "negative_add_time_ids",
170
- ]
171
-
172
- def __init__(
173
- self,
174
- vae: AutoencoderKL,
175
- text_encoder: CLIPTextModel,
176
- text_encoder_2: CLIPTextModelWithProjection,
177
- tokenizer: CLIPTokenizer,
178
- tokenizer_2: CLIPTokenizer,
179
- unet: UNet2DConditionModel,
180
- scheduler: KarrasDiffusionSchedulers,
181
- image_encoder: CLIPVisionModelWithProjection = None,
182
- feature_extractor: CLIPImageProcessor = None,
183
- force_zeros_for_empty_prompt: bool = True,
184
- add_watermarker: Optional[bool] = None,
185
- ):
186
- super().__init__()
187
-
188
- self.register_modules(
189
- vae=vae,
190
- text_encoder=text_encoder,
191
- text_encoder_2=text_encoder_2,
192
- tokenizer=tokenizer,
193
- tokenizer_2=tokenizer_2,
194
- unet=unet,
195
- scheduler=scheduler,
196
- image_encoder=image_encoder,
197
- feature_extractor=feature_extractor,
198
- )
199
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
200
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
201
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
202
-
203
- self.default_sample_size = self.unet.config.sample_size
204
-
205
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
206
-
207
- if add_watermarker:
208
- self.watermark = StableDiffusionXLWatermarker()
209
- else:
210
- self.watermark = None
211
-
212
- def encode_prompt(
213
- self,
214
- prompt: str,
215
- prompt_2: Optional[str] = None,
216
- device: Optional[torch.device] = None,
217
- num_images_per_prompt: int = 1,
218
- do_classifier_free_guidance: bool = True,
219
- negative_prompt: Optional[str] = None,
220
- negative_prompt_2: Optional[str] = None,
221
- prompt_embeds: Optional[torch.Tensor] = None,
222
- negative_prompt_embeds: Optional[torch.Tensor] = None,
223
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
224
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
225
- lora_scale: Optional[float] = None,
226
- clip_skip: Optional[int] = None,
227
- ):
228
- device = device or self._execution_device
229
-
230
- # set lora scale so that monkey patched LoRA
231
- # function of text encoder can correctly access it
232
- if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
233
- self._lora_scale = lora_scale
234
-
235
- # dynamically adjust the LoRA scale
236
- if self.text_encoder is not None:
237
- if not USE_PEFT_BACKEND:
238
- adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
239
- else:
240
- scale_lora_layers(self.text_encoder, lora_scale)
241
-
242
- if self.text_encoder_2 is not None:
243
- if not USE_PEFT_BACKEND:
244
- adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
245
- else:
246
- scale_lora_layers(self.text_encoder_2, lora_scale)
247
-
248
- prompt = [prompt] if isinstance(prompt, str) else prompt
249
-
250
- if prompt is not None:
251
- batch_size = len(prompt)
252
- else:
253
- batch_size = prompt_embeds.shape[0]
254
-
255
- # Define tokenizers and text encoders
256
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
257
- text_encoders = (
258
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
259
- )
260
-
261
- if prompt_embeds is None:
262
- prompt_2 = prompt_2 or prompt
263
- prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
264
-
265
- # textual inversion: process multi-vector tokens if necessary
266
- prompt_embeds_list = []
267
- prompts = [prompt, prompt_2]
268
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
269
- if isinstance(self, TextualInversionLoaderMixin):
270
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
271
-
272
- text_inputs = tokenizer(
273
- prompt,
274
- padding="max_length",
275
- max_length=tokenizer.model_max_length,
276
- truncation=True,
277
- return_tensors="pt",
278
- )
279
-
280
- text_input_ids = text_inputs.input_ids
281
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
282
-
283
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
284
- text_input_ids, untruncated_ids
285
- ):
286
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
287
- logger.warning(
288
- "The following part of your input was truncated because CLIP can only handle sequences up to"
289
- f" {tokenizer.model_max_length} tokens: {removed_text}"
290
- )
291
-
292
- prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
293
-
294
- # We are only ALWAYS interested in the pooled output of the final text encoder
295
- pooled_prompt_embeds = prompt_embeds[0]
296
- if clip_skip is None:
297
- prompt_embeds = prompt_embeds.hidden_states[-2]
298
- else:
299
- # "2" because SDXL always indexes from the penultimate layer.
300
- prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
301
-
302
- prompt_embeds_list.append(prompt_embeds)
303
-
304
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
305
-
306
- # get unconditional embeddings for classifier free guidance
307
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
308
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
309
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
310
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
311
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
312
- negative_prompt = negative_prompt or ""
313
- negative_prompt_2 = negative_prompt_2 or negative_prompt
314
-
315
- # normalize str to list
316
- negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
317
- negative_prompt_2 = (
318
- batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
319
- )
320
-
321
- uncond_tokens: List[str]
322
- if prompt is not None and type(prompt) is not type(negative_prompt):
323
- raise TypeError(
324
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
325
- f" {type(prompt)}."
326
- )
327
- elif batch_size != len(negative_prompt):
328
- raise ValueError(
329
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
330
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
331
- " the batch size of `prompt`."
332
- )
333
- else:
334
- uncond_tokens = [negative_prompt, negative_prompt_2]
335
-
336
- negative_prompt_embeds_list = []
337
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
338
- if isinstance(self, TextualInversionLoaderMixin):
339
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
340
-
341
- max_length = prompt_embeds.shape[1]
342
- uncond_input = tokenizer(
343
- negative_prompt,
344
- padding="max_length",
345
- max_length=max_length,
346
- truncation=True,
347
- return_tensors="pt",
348
- )
349
-
350
- negative_prompt_embeds = text_encoder(
351
- uncond_input.input_ids.to(device),
352
- output_hidden_states=True,
353
- )
354
- # We are only ALWAYS interested in the pooled output of the final text encoder
355
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
356
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
357
-
358
- negative_prompt_embeds_list.append(negative_prompt_embeds)
359
-
360
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
361
-
362
- if self.text_encoder_2 is not None:
363
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
364
- else:
365
- prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
366
-
367
- bs_embed, seq_len, _ = prompt_embeds.shape
368
- # duplicate text embeddings for each generation per prompt, using mps friendly method
369
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
370
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
371
-
372
- if do_classifier_free_guidance:
373
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
374
- seq_len = negative_prompt_embeds.shape[1]
375
-
376
- if self.text_encoder_2 is not None:
377
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
378
- else:
379
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
380
-
381
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
382
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
383
-
384
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
385
- bs_embed * num_images_per_prompt, -1
386
- )
387
- if do_classifier_free_guidance:
388
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
389
- bs_embed * num_images_per_prompt, -1
390
- )
391
-
392
- if self.text_encoder is not None:
393
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
394
- # Retrieve the original scale by scaling back the LoRA layers
395
- unscale_lora_layers(self.text_encoder, lora_scale)
396
-
397
- if self.text_encoder_2 is not None:
398
- if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
399
- # Retrieve the original scale by scaling back the LoRA layers
400
- unscale_lora_layers(self.text_encoder_2, lora_scale)
401
-
402
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
403
-
404
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
405
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
406
- dtype = next(self.image_encoder.parameters()).dtype
407
-
408
- if not isinstance(image, torch.Tensor):
409
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
410
-
411
- image = image.to(device=device, dtype=dtype)
412
- if output_hidden_states:
413
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
414
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
415
- uncond_image_enc_hidden_states = self.image_encoder(
416
- torch.zeros_like(image), output_hidden_states=True
417
- ).hidden_states[-2]
418
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
419
- num_images_per_prompt, dim=0
420
- )
421
- return image_enc_hidden_states, uncond_image_enc_hidden_states
422
- else:
423
- image_embeds = self.image_encoder(image).image_embeds
424
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
425
- uncond_image_embeds = torch.zeros_like(image_embeds)
426
-
427
- return image_embeds, uncond_image_embeds
428
-
429
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
430
- def prepare_ip_adapter_image_embeds(
431
- self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
432
- ):
433
- image_embeds = []
434
- if do_classifier_free_guidance:
435
- negative_image_embeds = []
436
- if ip_adapter_image_embeds is None:
437
- if not isinstance(ip_adapter_image, list):
438
- ip_adapter_image = [ip_adapter_image]
439
-
440
- if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
441
- raise ValueError(
442
- f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
443
- )
444
-
445
- for single_ip_adapter_image, image_proj_layer in zip(
446
- ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
447
- ):
448
- output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
449
- single_image_embeds, single_negative_image_embeds = self.encode_image(
450
- single_ip_adapter_image, device, 1, output_hidden_state
451
- )
452
-
453
- image_embeds.append(single_image_embeds[None, :])
454
- if do_classifier_free_guidance:
455
- negative_image_embeds.append(single_negative_image_embeds[None, :])
456
- else:
457
- for single_image_embeds in ip_adapter_image_embeds:
458
- if do_classifier_free_guidance:
459
- single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
460
- negative_image_embeds.append(single_negative_image_embeds)
461
- image_embeds.append(single_image_embeds)
462
-
463
- ip_adapter_image_embeds = []
464
- for i, single_image_embeds in enumerate(image_embeds):
465
- single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
466
- if do_classifier_free_guidance:
467
- single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
468
- single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
469
-
470
- single_image_embeds = single_image_embeds.to(device=device)
471
- ip_adapter_image_embeds.append(single_image_embeds)
472
-
473
- return ip_adapter_image_embeds
474
-
475
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
476
- def prepare_extra_step_kwargs(self, generator, eta):
477
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
478
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
479
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
480
- # and should be between [0, 1]
481
-
482
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
483
- extra_step_kwargs = {}
484
- if accepts_eta:
485
- extra_step_kwargs["eta"] = eta
486
-
487
- # check if the scheduler accepts generator
488
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
489
- if accepts_generator:
490
- extra_step_kwargs["generator"] = generator
491
- return extra_step_kwargs
492
-
493
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
494
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
495
- shape = (
496
- batch_size,
497
- num_channels_latents,
498
- int(height) // self.vae_scale_factor,
499
- int(width) // self.vae_scale_factor,
500
- )
501
- if isinstance(generator, list) and len(generator) != batch_size:
502
- raise ValueError(
503
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
504
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
505
- )
506
-
507
- if latents is None:
508
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
509
- else:
510
- latents = latents.to(device)
511
-
512
- # scale the initial noise by the standard deviation required by the scheduler
513
- latents = latents * self.scheduler.init_noise_sigma
514
- return latents
515
-
516
- def _get_add_time_ids(
517
- self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
518
- ):
519
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
520
-
521
- passed_add_embed_dim = (
522
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
523
- )
524
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
525
-
526
- if expected_add_embed_dim != passed_add_embed_dim:
527
- raise ValueError(
528
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
529
- )
530
-
531
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
532
- return add_time_ids
533
-
534
- def upcast_vae(self):
535
- dtype = self.vae.dtype
536
- self.vae.to(dtype=torch.float32)
537
- use_torch_2_0_or_xformers = isinstance(
538
- self.vae.decoder.mid_block.attentions[0].processor,
539
- (
540
- AttnProcessor2_0,
541
- XFormersAttnProcessor,
542
- FusedAttnProcessor2_0,
543
- ),
544
- )
545
- # if xformers or torch_2_0 is used attention block does not need
546
- # to be in float32 which can save lots of memory
547
- if use_torch_2_0_or_xformers:
548
- self.vae.post_quant_conv.to(dtype)
549
- self.vae.decoder.conv_in.to(dtype)
550
- self.vae.decoder.mid_block.to(dtype)
551
-
552
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
553
- def get_guidance_scale_embedding(
554
- self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
555
- ) -> torch.Tensor:
556
- """
557
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
558
-
559
- Args:
560
- w (`torch.Tensor`):
561
- Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
562
- embedding_dim (`int`, *optional*, defaults to 512):
563
- Dimension of the embeddings to generate.
564
- dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
565
- Data type of the generated embeddings.
566
-
567
- Returns:
568
- `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
569
- """
570
- assert len(w.shape) == 1
571
- w = w * 1000.0
572
-
573
- half_dim = embedding_dim // 2
574
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
575
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
576
- emb = w.to(dtype)[:, None] * emb[None, :]
577
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
578
- if embedding_dim % 2 == 1: # zero pad
579
- emb = torch.nn.functional.pad(emb, (0, 1))
580
- assert emb.shape == (w.shape[0], embedding_dim)
581
- return emb
582
-
583
- @property
584
- def guidance_scale(self):
585
- return self._guidance_scale
586
-
587
- @property
588
- def guidance_rescale(self):
589
- return self._guidance_rescale
590
-
591
- @property
592
- def clip_skip(self):
593
- return self._clip_skip
594
-
595
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
596
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
597
- # corresponds to doing no classifier free guidance.
598
- @property
599
- def do_classifier_free_guidance(self):
600
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
601
-
602
- @property
603
- def cross_attention_kwargs(self):
604
- return self._cross_attention_kwargs
605
-
606
- @property
607
- def denoising_end(self):
608
- return self._denoising_end
609
-
610
- @property
611
- def num_timesteps(self):
612
- return self._num_timesteps
613
-
614
- @property
615
- def interrupt(self):
616
- return self._interrupt
617
-
618
- @torch.no_grad()
619
- def __call__(
620
- self,
621
- prompt: Union[str, List[str]] = None,
622
- prompt_2: Optional[Union[str, List[str]]] = None,
623
- height: Optional[int] = None,
624
- width: Optional[int] = None,
625
- num_inference_steps: int = 50,
626
- timesteps: List[int] = None,
627
- sigmas: List[float] = None,
628
- denoising_end: Optional[float] = None,
629
- guidance_scale: float = 5.0,
630
- negative_prompt: Optional[Union[str, List[str]]] = None,
631
- negative_prompt_2: Optional[Union[str, List[str]]] = None,
632
- num_images_per_prompt: Optional[int] = 1,
633
- eta: float = 0.0,
634
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
635
- latents: Optional[torch.Tensor] = None,
636
- prompt_embeds: Optional[torch.Tensor] = None,
637
- negative_prompt_embeds: Optional[torch.Tensor] = None,
638
- pooled_prompt_embeds: Optional[torch.Tensor] = None,
639
- negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
640
- ip_adapter_image: Optional[PipelineImageInput] = None,
641
- ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
642
- output_type: Optional[str] = "pil",
643
- return_dict: bool = True,
644
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
645
- guidance_rescale: float = 0.0,
646
- end_cfg: float = 0.9,
647
- original_size: Optional[Tuple[int, int]] = None,
648
- crops_coords_top_left: Tuple[int, int] = (0, 0),
649
- target_size: Optional[Tuple[int, int]] = None,
650
- negative_original_size: Optional[Tuple[int, int]] = None,
651
- negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
652
- negative_target_size: Optional[Tuple[int, int]] = None,
653
- clip_skip: Optional[int] = None,
654
- callback_on_step_end: Optional[
655
- Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
656
- ] = None,
657
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
658
- **kwargs,
659
- ):
660
- callback = kwargs.pop("callback", None)
661
- callback_steps = kwargs.pop("callback_steps", None)
662
-
663
- if callback is not None:
664
- deprecate(
665
- "callback",
666
- "1.0.0",
667
- "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
668
- )
669
- if callback_steps is not None:
670
- deprecate(
671
- "callback_steps",
672
- "1.0.0",
673
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
674
- )
675
-
676
- if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
677
- callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
678
-
679
- # 0. Default height and width to unet
680
- height = height or self.default_sample_size * self.vae_scale_factor
681
- width = width or self.default_sample_size * self.vae_scale_factor
682
-
683
- original_size = original_size or (height, width)
684
- target_size = target_size or (height, width)
685
-
686
- self._guidance_scale = guidance_scale
687
- self._guidance_rescale = guidance_rescale
688
- self._clip_skip = clip_skip
689
- self._cross_attention_kwargs = cross_attention_kwargs
690
- self._denoising_end = denoising_end
691
- self._interrupt = False
692
-
693
- # 2. Define call parameters
694
- if prompt is not None and isinstance(prompt, str):
695
- batch_size = 1
696
- elif prompt is not None and isinstance(prompt, list):
697
- batch_size = len(prompt)
698
- else:
699
- batch_size = prompt_embeds.shape[0]
700
-
701
- device = self._execution_device
702
-
703
- # 3. Encode input prompt
704
- lora_scale = (
705
- self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
706
- )
707
-
708
- (
709
- prompt_embeds,
710
- negative_prompt_embeds,
711
- pooled_prompt_embeds,
712
- negative_pooled_prompt_embeds,
713
- ) = self.encode_prompt(
714
- prompt=prompt,
715
- prompt_2=prompt_2,
716
- device=device,
717
- num_images_per_prompt=num_images_per_prompt,
718
- do_classifier_free_guidance=self.do_classifier_free_guidance,
719
- negative_prompt=negative_prompt,
720
- negative_prompt_2=negative_prompt_2,
721
- prompt_embeds=prompt_embeds,
722
- negative_prompt_embeds=negative_prompt_embeds,
723
- pooled_prompt_embeds=pooled_prompt_embeds,
724
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
725
- lora_scale=lora_scale,
726
- clip_skip=self.clip_skip,
727
- )
728
-
729
- # 4. Prepare timesteps
730
- timesteps, num_inference_steps = retrieve_timesteps(
731
- self.scheduler, num_inference_steps, device, timesteps, sigmas
732
- )
733
-
734
- # 5. Prepare latent variables
735
- num_channels_latents = self.unet.config.in_channels
736
- latents = self.prepare_latents(
737
- batch_size * num_images_per_prompt,
738
- num_channels_latents,
739
- height,
740
- width,
741
- prompt_embeds.dtype,
742
- device,
743
- generator,
744
- latents,
745
- )
746
-
747
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
748
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
749
-
750
- # 7. Prepare added time ids & embeddings
751
- add_text_embeds = pooled_prompt_embeds
752
- if self.text_encoder_2 is None:
753
- text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
754
- else:
755
- text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
756
-
757
- add_time_ids = self._get_add_time_ids(
758
- original_size,
759
- crops_coords_top_left,
760
- target_size,
761
- dtype=prompt_embeds.dtype,
762
- text_encoder_projection_dim=text_encoder_projection_dim,
763
- )
764
- if negative_original_size is not None and negative_target_size is not None:
765
- negative_add_time_ids = self._get_add_time_ids(
766
- negative_original_size,
767
- negative_crops_coords_top_left,
768
- negative_target_size,
769
- dtype=prompt_embeds.dtype,
770
- text_encoder_projection_dim=text_encoder_projection_dim,
771
- )
772
- else:
773
- negative_add_time_ids = add_time_ids
774
-
775
- if self.do_classifier_free_guidance:
776
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
777
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
778
- add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
779
-
780
- prompt_embeds = prompt_embeds.to(device)
781
- add_text_embeds = add_text_embeds.to(device)
782
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
783
-
784
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
785
- image_embeds = self.prepare_ip_adapter_image_embeds(
786
- ip_adapter_image,
787
- ip_adapter_image_embeds,
788
- device,
789
- batch_size * num_images_per_prompt,
790
- self.do_classifier_free_guidance,
791
- )
792
-
793
- # 8. Denoising loop
794
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
795
-
796
- # 8.1 Apply denoising_end
797
- if (
798
- self.denoising_end is not None
799
- and isinstance(self.denoising_end, float)
800
- and self.denoising_end > 0
801
- and self.denoising_end < 1
802
- ):
803
- discrete_timestep_cutoff = int(
804
- round(
805
- self.scheduler.config.num_train_timesteps
806
- - (self.denoising_end * self.scheduler.config.num_train_timesteps)
807
- )
808
- )
809
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
810
- timesteps = timesteps[:num_inference_steps]
811
-
812
- # 9. Optionally get Guidance Scale Embedding
813
- timestep_cond = None
814
- if self.unet.config.time_cond_proj_dim is not None:
815
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
816
- timestep_cond = self.get_guidance_scale_embedding(
817
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
818
- ).to(device=device, dtype=latents.dtype)
819
-
820
- self._num_timesteps = len(timesteps)
821
- with self.progress_bar(total=num_inference_steps) as progress_bar:
822
- do_classifier_free_guidance = self.do_classifier_free_guidance
823
- for i, t in enumerate(timesteps):
824
- if self.interrupt:
825
- continue
826
- if end_cfg is not None and i / num_inference_steps > end_cfg and do_classifier_free_guidance:
827
- do_classifier_free_guidance = False
828
- prompt_embeds = 1.5*torch.chunk(prompt_embeds, 2, dim=0)[-1]
829
- add_text_embeds = 1.5*torch.chunk(add_text_embeds, 2, dim=0)[-1]
830
- add_time_ids = 1.25*torch.chunk(add_time_ids, 2, dim=0)[-1]
831
- # expand the latents if we are doing classifier free guidance
832
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
833
-
834
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
835
-
836
- # predict the noise residual
837
- added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
838
- if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
839
- added_cond_kwargs["image_embeds"] = image_embeds
840
- noise_pred = self.unet(
841
- latent_model_input,
842
- t,
843
- encoder_hidden_states=prompt_embeds,
844
- timestep_cond=timestep_cond,
845
- cross_attention_kwargs=self.cross_attention_kwargs,
846
- added_cond_kwargs=added_cond_kwargs,
847
- return_dict=False,
848
- )[0]
849
-
850
- # perform guidance
851
- if do_classifier_free_guidance:
852
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
853
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
854
-
855
- if do_classifier_free_guidance and self.guidance_rescale > 0.0:
856
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
857
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
858
-
859
- # compute the previous noisy sample x_t -> x_t-1
860
- latents_dtype = latents.dtype
861
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
862
- if latents.dtype != latents_dtype:
863
- if torch.backends.mps.is_available():
864
- # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
865
- latents = latents.to(latents_dtype)
866
-
867
- if callback_on_step_end is not None:
868
- callback_kwargs = {}
869
- for k in callback_on_step_end_tensor_inputs:
870
- callback_kwargs[k] = locals()[k]
871
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
872
-
873
- latents = callback_outputs.pop("latents", latents)
874
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
875
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
876
- add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
877
- negative_pooled_prompt_embeds = callback_outputs.pop(
878
- "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
879
- )
880
- add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
881
- negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
882
-
883
- # call the callback, if provided
884
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
885
- progress_bar.update()
886
- if callback is not None and i % callback_steps == 0:
887
- step_idx = i // getattr(self.scheduler, "order", 1)
888
- callback(step_idx, t, latents)
889
-
890
- if XLA_AVAILABLE:
891
- xm.mark_step()
892
-
893
- if not output_type == "latent":
894
- # make sure the VAE is in float32 mode, as it overflows in float16
895
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
896
-
897
- if needs_upcasting:
898
- self.upcast_vae()
899
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
900
- elif latents.dtype != self.vae.dtype:
901
- if torch.backends.mps.is_available():
902
- # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
903
- self.vae = self.vae.to(latents.dtype)
904
-
905
- # unscale/denormalize the latents
906
- # denormalize with the mean and std if available and not None
907
- has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
908
- has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
909
- if has_latents_mean and has_latents_std:
910
- latents_mean = (
911
- torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
912
- )
913
- latents_std = (
914
- torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
915
- )
916
- latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
917
- else:
918
- latents = latents / self.vae.config.scaling_factor
919
-
920
- image = self.vae.decode(latents, return_dict=False)[0]
921
-
922
- # cast back to fp16 if needed
923
- if needs_upcasting:
924
- self.vae.to(dtype=torch.float16)
925
- else:
926
- image = latents
927
-
928
- if not output_type == "latent":
929
- # apply watermark if available
930
- if self.watermark is not None:
931
- image = self.watermark.apply_watermark(image)
932
-
933
- image = self.image_processor.postprocess(image, output_type=output_type)
934
-
935
- # Offload all models
936
- self.maybe_free_model_hooks()
937
-
938
- if not return_dict:
939
- return (image,)
940
-
941
- return StableDiffusionXLPipelineOutput(images=image)
942
 
943
  def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
944
- """Load and prepare the pipeline."""
945
  if not pipeline:
946
- pipeline = StableDiffusionXLPipeline_new.from_pretrained(
947
  "./models/newdream-sdxl-20",
948
  torch_dtype=torch.float16,
949
  local_files_only=True,
950
- ).to("cuda")
951
-
952
  pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
953
  pipeline = compile_pipe(pipeline)
954
- load_pipe(pipeline, dir="cached_pipe")
955
 
956
- # Warm-up runs
957
  for _ in range(4):
958
- pipeline(
959
- prompt="a cat and a dog",
960
- num_inference_steps=18
961
- )
962
  pipeline.scheduler.prepare_loss()
963
  return pipeline
964
 
965
-
966
  def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
967
- """Generate image from text prompt."""
968
- generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
969
-
970
- image_0 = pipeline(
 
 
971
  prompt=request.prompt,
972
  negative_prompt=request.negative_prompt,
973
  width=request.width,
974
  height=request.height,
975
  generator=generator,
976
- num_inference_steps=18,
 
 
 
977
  ).images[0]
978
 
979
- filter_image = max_pixel_filter(image_0)
980
- return filter_image
981
-
982
-
 
1
  import torch
2
+ from PIL.Image import Image
3
+ from onediffx.deep_cache import StableDiffusionXLPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
+ import oneflow as flow
7
+ from onediff.infer_compiler import oneflow_compile
8
+ from onediffx import compile_pipe, save_pipe, load_pipe
9
+ from diffusers import DDIMScheduler
10
  from loss import SchedulerWrapper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
 
13
  if not pipeline:
14
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
15
  "./models/newdream-sdxl-20",
16
  torch_dtype=torch.float16,
17
  local_files_only=True,
18
+ )
19
+ pipeline.to("cuda")
20
  pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
21
  pipeline = compile_pipe(pipeline)
22
+ pipeline.unet = oneflow_compile(pipeline.unet)
23
 
24
+ load_pipe(pipeline,dir="cached_pipe")
25
  for _ in range(4):
26
+ deepcache_output = pipeline(prompt="make submissions great again", cache_interval=1, cache_layer_id=0, cache_block_id=0, num_inference_steps=20)
 
 
 
27
  pipeline.scheduler.prepare_loss()
28
  return pipeline
29
 
 
30
  def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
31
+ if request.seed is None:
32
+ generator = None
33
+ else:
34
+ generator = Generator(pipeline.device).manual_seed(request.seed)
35
+
36
+ return pipeline(
37
  prompt=request.prompt,
38
  negative_prompt=request.negative_prompt,
39
  width=request.width,
40
  height=request.height,
41
  generator=generator,
42
+ num_inference_steps=15,
43
+ cache_interval=1,
44
+ cache_layer_id=0,
45
+ cache_block_id=0,
46
  ).images[0]
47
 
 
 
 
 
uv.lock CHANGED
@@ -34,19 +34,6 @@ version = "4.9.3"
34
  source = { registry = "https://pypi.org/simple" }
35
  sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034 }
36
 
37
- [[package]]
38
- name = "bitsandbytes"
39
- version = "0.44.1"
40
- source = { registry = "https://pypi.org/simple" }
41
- dependencies = [
42
- { name = "numpy" },
43
- { name = "torch" },
44
- ]
45
- wheels = [
46
- { url = "https://files.pythonhosted.org/packages/e4/e6/ccb84da7ffaf208a71c2c3c8e1120b34759df640db959660be9a98505eb4/bitsandbytes-0.44.1-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:b2f24c6cbf11fc8c5d69b3dcecee9f7011451ec59d6ac833e873c9f105259668", size = 122419627 },
47
- { url = "https://files.pythonhosted.org/packages/5f/f5/11bddebb5addc0a005b0c1cecc6e4c6e4055ad7b860bdcbf6374e12a51f5/bitsandbytes-0.44.1-py3-none-win_amd64.whl", hash = "sha256:8e68e12aa25d2cf9a1730ad72890a5d1a19daa23f459a6a4679331f353d58cb4", size = 121451331 },
48
- ]
49
-
50
  [[package]]
51
  name = "certifi"
52
  version = "2024.8.30"
@@ -114,7 +101,6 @@ version = "6"
114
  source = { editable = "." }
115
  dependencies = [
116
  { name = "accelerate" },
117
- { name = "bitsandbytes" },
118
  { name = "diffusers" },
119
  { name = "edge-maxxing-pipelines" },
120
  { name = "huggingface-hub" },
@@ -124,8 +110,6 @@ dependencies = [
124
  { name = "onediffx" },
125
  { name = "oneflow" },
126
  { name = "setuptools" },
127
- { name = "stable-fast" },
128
- { name = "tomesd" },
129
  { name = "torch" },
130
  { name = "torchvision" },
131
  { name = "transformers" },
@@ -136,18 +120,15 @@ dependencies = [
136
  [package.metadata]
137
  requires-dist = [
138
  { name = "accelerate", specifier = "==0.31.0" },
139
- { name = "bitsandbytes", specifier = ">=0.44.1" },
140
  { name = "diffusers", specifier = "==0.28.2" },
141
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines" },
142
- { name = "huggingface-hub", specifier = "==0.25.2" },
143
  { name = "numpy", specifier = "==1.26.4" },
144
  { name = "omegaconf", specifier = "==2.3.0" },
145
  { name = "onediff", specifier = "==1.2.0" },
146
  { name = "onediffx", specifier = "==1.2.0" },
147
  { name = "oneflow", url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" },
148
- { name = "setuptools", specifier = ">=75.2.0" },
149
- { name = "stable-fast", url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" },
150
- { name = "tomesd", specifier = ">=0.1.3" },
151
  { name = "torch", specifier = "==2.2.2" },
152
  { name = "torchvision", specifier = "==0.17.2" },
153
  { name = "transformers", specifier = "==4.41.2" },
@@ -183,7 +164,7 @@ wheels = [
183
 
184
  [[package]]
185
  name = "huggingface-hub"
186
- version = "0.25.2"
187
  source = { registry = "https://pypi.org/simple" }
188
  dependencies = [
189
  { name = "filelock" },
@@ -194,9 +175,9 @@ dependencies = [
194
  { name = "tqdm" },
195
  { name = "typing-extensions" },
196
  ]
197
- sdist = { url = "https://files.pythonhosted.org/packages/df/fd/5f81bae67096c5ab50d29a0230b8374f0245916cca192f8ee2fada51f4f6/huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c", size = 365806 }
198
  wheels = [
199
- { url = "https://files.pythonhosted.org/packages/64/09/a535946bf2dc88e61341f39dc507530411bb3ea4eac493e5ec833e8f35bd/huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25", size = 436575 },
200
  ]
201
 
202
  [[package]]
@@ -777,31 +758,6 @@ wheels = [
777
  { url = "https://files.pythonhosted.org/packages/31/2d/90165d51ecd38f9a02c6832198c13a4e48652485e2ccf863ebb942c531b6/setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8", size = 1249825 },
778
  ]
779
 
780
- [[package]]
781
- name = "stable-fast"
782
- version = "1.0.5+torch222cu121"
783
- source = { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" }
784
- dependencies = [
785
- { name = "torch" },
786
- ]
787
- wheels = [
788
- { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:03d193666c52016096ecfdf0e8e2183fb5aa8ea51e99d5132b353a1e9a6c1264" },
789
- ]
790
-
791
- [package.metadata]
792
- requires-dist = [
793
- { name = "diffusers", marker = "extra == 'diffusers'", specifier = ">=0.19.0" },
794
- { name = "numpy", marker = "extra == 'dev'" },
795
- { name = "opencv-python", marker = "extra == 'dev'" },
796
- { name = "pillow", marker = "extra == 'dev'" },
797
- { name = "prettytable", marker = "extra == 'dev'" },
798
- { name = "pytest", marker = "extra == 'dev'" },
799
- { name = "torch" },
800
- { name = "transformers", marker = "extra == 'diffusers'" },
801
- { name = "triton", marker = "extra == 'triton'", specifier = ">=2.1.0" },
802
- { name = "xformers", marker = "extra == 'xformers'", specifier = ">=0.0.20" },
803
- ]
804
-
805
  [[package]]
806
  name = "sympy"
807
  version = "1.13.3"
@@ -844,18 +800,6 @@ wheels = [
844
  { url = "https://files.pythonhosted.org/packages/45/b6/36c1bb106bbe96012c9367df89ed01599cada036c0b96d38fbbdbeb75c9f/tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75", size = 9945103 },
845
  ]
846
 
847
- [[package]]
848
- name = "tomesd"
849
- version = "0.1.3"
850
- source = { registry = "https://pypi.org/simple" }
851
- dependencies = [
852
- { name = "torch" },
853
- ]
854
- sdist = { url = "https://files.pythonhosted.org/packages/29/37/ed74c7449fe5a8a4726be3dab4d879d000babf6ea538658171933b1f000e/tomesd-0.1.3.tar.gz", hash = "sha256:15bba2e952f4643c8355951e892fda918ddccbdff2238dc368d42bd078fcedc9", size = 14032 }
855
- wheels = [
856
- { url = "https://files.pythonhosted.org/packages/0c/02/367c67c8f510313f143a7818e92254a5f861c7d94c98ad6a08d25db52fee/tomesd-0.1.3-py3-none-any.whl", hash = "sha256:3d5aa0857fe2c2aab253891050601ca13a87d8d7a99b6760b9ca0856aa0c6355", size = 11467 },
857
- ]
858
-
859
  [[package]]
860
  name = "torch"
861
  version = "2.2.2"
@@ -988,4 +932,4 @@ source = { registry = "https://pypi.org/simple" }
988
  sdist = { url = "https://files.pythonhosted.org/packages/54/bf/5c0000c44ebc80123ecbdddba1f5dcd94a5ada602a9c225d84b5aaa55e86/zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29", size = 24199 }
989
  wheels = [
990
  { url = "https://files.pythonhosted.org/packages/62/8b/5ba542fa83c90e09eac972fc9baca7a88e7e7ca4b221a89251954019308b/zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350", size = 9200 },
991
- ]
 
34
  source = { registry = "https://pypi.org/simple" }
35
  sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034 }
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  [[package]]
38
  name = "certifi"
39
  version = "2024.8.30"
 
101
  source = { editable = "." }
102
  dependencies = [
103
  { name = "accelerate" },
 
104
  { name = "diffusers" },
105
  { name = "edge-maxxing-pipelines" },
106
  { name = "huggingface-hub" },
 
110
  { name = "onediffx" },
111
  { name = "oneflow" },
112
  { name = "setuptools" },
 
 
113
  { name = "torch" },
114
  { name = "torchvision" },
115
  { name = "transformers" },
 
120
  [package.metadata]
121
  requires-dist = [
122
  { name = "accelerate", specifier = "==0.31.0" },
 
123
  { name = "diffusers", specifier = "==0.28.2" },
124
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines" },
125
+ { name = "huggingface-hub", specifier = "==0.24.7" },
126
  { name = "numpy", specifier = "==1.26.4" },
127
  { name = "omegaconf", specifier = "==2.3.0" },
128
  { name = "onediff", specifier = "==1.2.0" },
129
  { name = "onediffx", specifier = "==1.2.0" },
130
  { name = "oneflow", url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" },
131
+ { name = "setuptools", specifier = "==75.2.0" },
 
 
132
  { name = "torch", specifier = "==2.2.2" },
133
  { name = "torchvision", specifier = "==0.17.2" },
134
  { name = "transformers", specifier = "==4.41.2" },
 
164
 
165
  [[package]]
166
  name = "huggingface-hub"
167
+ version = "0.24.7"
168
  source = { registry = "https://pypi.org/simple" }
169
  dependencies = [
170
  { name = "filelock" },
 
175
  { name = "tqdm" },
176
  { name = "typing-extensions" },
177
  ]
178
+ sdist = { url = "https://files.pythonhosted.org/packages/af/33/d252098a3b8d910065ad09cf318efb5dbe6c8bb586269bdfb47b7e021020/huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207", size = 349211 }
179
  wheels = [
180
+ { url = "https://files.pythonhosted.org/packages/57/28/a0b0dd3cca63908045edc300360d6cd8758d4d86eee3fd2b08f00c5a41c4/huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5", size = 417514 },
181
  ]
182
 
183
  [[package]]
 
758
  { url = "https://files.pythonhosted.org/packages/31/2d/90165d51ecd38f9a02c6832198c13a4e48652485e2ccf863ebb942c531b6/setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8", size = 1249825 },
759
  ]
760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
761
  [[package]]
762
  name = "sympy"
763
  version = "1.13.3"
 
800
  { url = "https://files.pythonhosted.org/packages/45/b6/36c1bb106bbe96012c9367df89ed01599cada036c0b96d38fbbdbeb75c9f/tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75", size = 9945103 },
801
  ]
802
 
 
 
 
 
 
 
 
 
 
 
 
 
803
  [[package]]
804
  name = "torch"
805
  version = "2.2.2"
 
932
  sdist = { url = "https://files.pythonhosted.org/packages/54/bf/5c0000c44ebc80123ecbdddba1f5dcd94a5ada602a9c225d84b5aaa55e86/zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29", size = 24199 }
933
  wheels = [
934
  { url = "https://files.pythonhosted.org/packages/62/8b/5ba542fa83c90e09eac972fc9baca7a88e7e7ca4b221a89251954019308b/zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350", size = 9200 },
935
+ ]