agentbot commited on
Commit
436e6fc
·
verified ·
1 Parent(s): f32af0f

Initial commit with folder contents

Browse files
.gitattributes CHANGED
@@ -36,4 +36,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  cached_pipe/text_encoder filter=lfs diff=lfs merge=lfs -text
37
  cached_pipe/text_encoder_2 filter=lfs diff=lfs merge=lfs -text
38
  cached_pipe/vae.decoder filter=lfs diff=lfs merge=lfs -text
 
39
  cached_pipe/unet filter=lfs diff=lfs merge=lfs -text
 
36
  cached_pipe/text_encoder filter=lfs diff=lfs merge=lfs -text
37
  cached_pipe/text_encoder_2 filter=lfs diff=lfs merge=lfs -text
38
  cached_pipe/vae.decoder filter=lfs diff=lfs merge=lfs -text
39
+ cached_pipe/fast_unet filter=lfs diff=lfs merge=lfs -text
40
  cached_pipe/unet filter=lfs diff=lfs merge=lfs -text
cached_pipe/text_encoder CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e05bb0bdc1e7bb9af8b5524fce5523d4ceadfc12037fce22cf224e9797f9c31
3
- size 2728178
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386b7cad4378861ad4fb7ecb4dee107bf7fe28c76668bea03a0dc084a210aced
3
+ size 2728173
cached_pipe/text_encoder_2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59f85126611ab3b28ea283da1b1a77eeae6937d3ed3c5fc9413c8b40418165fd
3
- size 9363012
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f97e11242c00f86337bd9801ad4820b68c99918a922c8542922be505c2bb430
3
+ size 9363341
cached_pipe/unet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ca58697e0025abc011c46f214df6e0850206b11287a91e79b4fe631d1a8adaa
3
- size 687271819
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e600471d0d62f0d13d24a2c2e79e49eae4709d02c8218912029e2b5eda7c457f
3
+ size 676786352
cached_pipe/vae.decoder CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af3d1483221ecf9f753b862fed022729c3a793f1d85740d2cb93a9a8a8e995cf
3
- size 187873891
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3986989d6743d07e59fb46dff1a13456b6fe41fe5bc9f635e194be7e01e73583
3
+ size 187873926
loss_params.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c8d58c214ba22a6aeacea98cef1a4b88fb88c8d0ed113f39b6a60b0165b3bdb
3
- size 3952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27ba04dc09bfe8325c2b8d8acbfa5fbf746f61169cf1cdfe07d028ad697217f1
3
+ size 3568
pyproject.toml CHANGED
@@ -11,7 +11,7 @@ dependencies = [
11
  "diffusers==0.28.2",
12
  "onediff==1.2.0",
13
  "onediffx==1.2.0",
14
- "oneflow",
15
  "numpy==1.26.4",
16
  "xformers==0.0.25.post1",
17
  "triton==2.2.0",
@@ -20,13 +20,18 @@ dependencies = [
20
  "omegaconf==2.3.0",
21
  "torch==2.2.2",
22
  "torchvision==0.17.2",
23
- "huggingface_hub==0.24.7",
24
- "setuptools==75.2.0",
25
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
 
 
 
 
 
26
  ]
27
 
28
  [tool.uv.sources]
29
  oneflow = { url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }
 
30
 
31
  [project.scripts]
32
- start_inference = "main:main"
 
11
  "diffusers==0.28.2",
12
  "onediff==1.2.0",
13
  "onediffx==1.2.0",
14
+ "accelerate==0.31.0",
15
  "numpy==1.26.4",
16
  "xformers==0.0.25.post1",
17
  "triton==2.2.0",
 
20
  "omegaconf==2.3.0",
21
  "torch==2.2.2",
22
  "torchvision==0.17.2",
23
+ "huggingface-hub==0.25.2",
 
24
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
25
+ "oneflow",
26
+ "setuptools>=75.2.0",
27
+ "bitsandbytes>=0.44.1",
28
+ "stable-fast",
29
+ "tomesd>=0.1.3",
30
  ]
31
 
32
  [tool.uv.sources]
33
  oneflow = { url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" }
34
+ stable-fast = { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" }
35
 
36
  [project.scripts]
37
+ start_inference = "main:main"
src/loss.py CHANGED
@@ -42,4 +42,4 @@ class SchedulerWrapper:
42
  for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G)
43
  H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D
44
  def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model)
45
- def prepare_loss(A,num_accelerate_steps=16):A.load_loss_params()
 
42
  for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G)
43
  H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D
44
  def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model)
45
+ def prepare_loss(A,num_accelerate_steps=15):A.load_loss_params()
src/pipeline.py CHANGED
@@ -1,47 +1,984 @@
1
  import torch
2
- from PIL.Image import Image
3
- from onediffx.deep_cache import StableDiffusionXLPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
- import oneflow as flow
7
- from onediff.infer_compiler import oneflow_compile
8
- from onediffx import compile_pipe, save_pipe, load_pipe
9
- from diffusers import DDIMScheduler
10
  from loss import SchedulerWrapper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
 
13
  if not pipeline:
14
- pipeline = StableDiffusionXLPipeline.from_pretrained(
15
  "./models/newdream-sdxl-20",
16
  torch_dtype=torch.float16,
17
  local_files_only=True,
18
- )
19
- pipeline.to("cuda")
20
  pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
21
  pipeline = compile_pipe(pipeline)
22
- pipeline.unet = oneflow_compile(pipeline.unet)
23
 
24
- load_pipe(pipeline,dir="cached_pipe")
25
  for _ in range(4):
26
- deepcache_output = pipeline(prompt="make submissions great again", cache_interval=1, cache_layer_id=0, cache_block_id=0, num_inference_steps=20)
 
 
 
27
  pipeline.scheduler.prepare_loss()
28
  return pipeline
29
 
30
- def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
31
- if request.seed is None:
32
- generator = None
33
- else:
34
- generator = Generator(pipeline.device).manual_seed(request.seed)
35
 
36
- return pipeline(
 
 
 
 
37
  prompt=request.prompt,
38
  negative_prompt=request.negative_prompt,
39
  width=request.width,
40
  height=request.height,
41
  generator=generator,
42
- num_inference_steps=15,
43
- cache_interval=1,
44
- cache_layer_id=0,
45
- cache_block_id=0,
46
  ).images[0]
47
 
 
 
 
 
 
1
  import torch
2
+ from PIL import Image
 
3
  from pipelines.models import TextToImageRequest
4
  from torch import Generator
5
+ import json
6
+ from diffusers import StableDiffusionXLPipeline, DDIMScheduler
7
+ import inspect
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
  from loss import SchedulerWrapper
10
+ from onediffx import compile_pipe,load_pipe
11
+ # Import necessary components
12
+ from transformers import (
13
+ CLIPImageProcessor,
14
+ CLIPTextModel,
15
+ CLIPTextModelWithProjection,
16
+ CLIPTokenizer,
17
+ CLIPVisionModelWithProjection,
18
+ )
19
+
20
+
21
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
22
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
23
+ from diffusers.loaders import (
24
+ FromSingleFileMixin,
25
+ IPAdapterMixin,
26
+ StableDiffusionXLLoraLoaderMixin,
27
+ TextualInversionLoaderMixin,
28
+ )
29
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
30
+ from diffusers.models.attention_processor import (
31
+ AttnProcessor2_0,
32
+ FusedAttnProcessor2_0,
33
+ XFormersAttnProcessor,
34
+ )
35
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
36
+ from diffusers.schedulers import KarrasDiffusionSchedulers
37
+ from diffusers.utils import (
38
+ USE_PEFT_BACKEND,
39
+ deprecate,
40
+ is_invisible_watermark_available,
41
+ is_torch_xla_available,
42
+ logging,
43
+ replace_example_docstring,
44
+ scale_lora_layers,
45
+ unscale_lora_layers,
46
+ )
47
+ from diffusers.utils.torch_utils import randn_tensor
48
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
50
+
51
+ # Import watermark if available
52
+ if is_invisible_watermark_available():
53
+ from .watermark import StableDiffusionXLWatermarker
54
+
55
+ # Check for XLA availability
56
+ if is_torch_xla_available():
57
+ import torch_xla.core.xla_model as xm
58
+ XLA_AVAILABLE = True
59
+ else:
60
+ XLA_AVAILABLE = False
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+ # Constants
65
+ EXAMPLE_DOC_STRING = """
66
+ Examples:
67
+ ```py
68
+ >>> import torch
69
+ >>> from diffusers import StableDiffusionXLPipeline
70
+
71
+ >>> pipe = StableDiffusionXLPipeline.from_pretrained(
72
+ >>> "stabilityai/stable-diffusion-xl-base-1.0",
73
+ >>> torch_dtype=torch.float16
74
+ >>> )
75
+ >>> pipe = pipe.to("cuda")
76
+
77
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
78
+ >>> image = pipe(prompt).images[0]
79
+ ```
80
+ """
81
+
82
+ # Helper functions
83
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
84
+ """Rescale noise configuration."""
85
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
86
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
87
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
88
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
89
+ return noise_cfg
90
+
91
+ # Utils functions
92
+ import numpy as np
93
+ def max_pixel_filter(image: Image) -> Image:
94
+ try:
95
+ # Convert the image to a numpy array
96
+ img_array = np.array(image)
97
+ # Find the maximum pixel value in the image
98
+ # max_val = img_array.max()
99
+ max_val = img_array.min()
100
+
101
+ # Reduce the maximum value to 1
102
+ img_array[img_array == max_val] += 1
103
+ # Convert the numpy array back to an image
104
+ filtered_image = Image.fromarray(img_array)
105
+ return filtered_image
106
+ except:
107
+ return image
108
+
109
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
110
+ def retrieve_timesteps(
111
+ scheduler,
112
+ num_inference_steps: Optional[int] = None,
113
+ device: Optional[Union[str, torch.device]] = None,
114
+ timesteps: Optional[List[int]] = None,
115
+ sigmas: Optional[List[float]] = None,
116
+ **kwargs,
117
+ ):
118
+ if timesteps is not None and sigmas is not None:
119
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
120
+ if timesteps is not None:
121
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
122
+ if not accepts_timesteps:
123
+ raise ValueError(
124
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
125
+ f" timestep schedules. Please check whether you are using the correct scheduler."
126
+ )
127
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
128
+ timesteps = scheduler.timesteps
129
+ num_inference_steps = len(timesteps)
130
+ elif sigmas is not None:
131
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
132
+ if not accept_sigmas:
133
+ raise ValueError(
134
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
135
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
136
+ )
137
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
138
+ timesteps = scheduler.timesteps
139
+ num_inference_steps = len(timesteps)
140
+ else:
141
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
142
+ timesteps = scheduler.timesteps
143
+ return timesteps, num_inference_steps
144
+
145
+
146
+ class StableDiffusionXLPipeline_new(
147
+ DiffusionPipeline,
148
+ StableDiffusionMixin,
149
+ FromSingleFileMixin,
150
+ StableDiffusionXLLoraLoaderMixin,
151
+ TextualInversionLoaderMixin,
152
+ IPAdapterMixin,
153
+ ):
154
+
155
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
156
+ _optional_components = [
157
+ "tokenizer",
158
+ "tokenizer_2",
159
+ "text_encoder",
160
+ "text_encoder_2",
161
+ "image_encoder",
162
+ "feature_extractor",
163
+ ]
164
+ _callback_tensor_inputs = [
165
+ "latents",
166
+ "prompt_embeds",
167
+ "negative_prompt_embeds",
168
+ "add_text_embeds",
169
+ "add_time_ids",
170
+ "negative_pooled_prompt_embeds",
171
+ "negative_add_time_ids",
172
+ ]
173
+
174
+ def __init__(
175
+ self,
176
+ vae: AutoencoderKL,
177
+ text_encoder: CLIPTextModel,
178
+ text_encoder_2: CLIPTextModelWithProjection,
179
+ tokenizer: CLIPTokenizer,
180
+ tokenizer_2: CLIPTokenizer,
181
+ unet: UNet2DConditionModel,
182
+ scheduler: KarrasDiffusionSchedulers,
183
+ image_encoder: CLIPVisionModelWithProjection = None,
184
+ feature_extractor: CLIPImageProcessor = None,
185
+ force_zeros_for_empty_prompt: bool = True,
186
+ add_watermarker: Optional[bool] = None,
187
+ ):
188
+ super().__init__()
189
+
190
+ self.register_modules(
191
+ vae=vae,
192
+ text_encoder=text_encoder,
193
+ text_encoder_2=text_encoder_2,
194
+ tokenizer=tokenizer,
195
+ tokenizer_2=tokenizer_2,
196
+ unet=unet,
197
+ scheduler=scheduler,
198
+ image_encoder=image_encoder,
199
+ feature_extractor=feature_extractor,
200
+ )
201
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
202
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
203
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
204
+
205
+ self.default_sample_size = self.unet.config.sample_size
206
+
207
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
208
+
209
+ if add_watermarker:
210
+ self.watermark = StableDiffusionXLWatermarker()
211
+ else:
212
+ self.watermark = None
213
+
214
+ def encode_prompt(
215
+ self,
216
+ prompt: str,
217
+ prompt_2: Optional[str] = None,
218
+ device: Optional[torch.device] = None,
219
+ num_images_per_prompt: int = 1,
220
+ do_classifier_free_guidance: bool = True,
221
+ negative_prompt: Optional[str] = None,
222
+ negative_prompt_2: Optional[str] = None,
223
+ prompt_embeds: Optional[torch.Tensor] = None,
224
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
225
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
226
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
227
+ lora_scale: Optional[float] = None,
228
+ clip_skip: Optional[int] = None,
229
+ ):
230
+ device = device or self._execution_device
231
+
232
+ # set lora scale so that monkey patched LoRA
233
+ # function of text encoder can correctly access it
234
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
235
+ self._lora_scale = lora_scale
236
+
237
+ # dynamically adjust the LoRA scale
238
+ if self.text_encoder is not None:
239
+ if not USE_PEFT_BACKEND:
240
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
241
+ else:
242
+ scale_lora_layers(self.text_encoder, lora_scale)
243
+
244
+ if self.text_encoder_2 is not None:
245
+ if not USE_PEFT_BACKEND:
246
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
247
+ else:
248
+ scale_lora_layers(self.text_encoder_2, lora_scale)
249
+
250
+ prompt = [prompt] if isinstance(prompt, str) else prompt
251
+
252
+ if prompt is not None:
253
+ batch_size = len(prompt)
254
+ else:
255
+ batch_size = prompt_embeds.shape[0]
256
+
257
+ # Define tokenizers and text encoders
258
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
259
+ text_encoders = (
260
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
261
+ )
262
+
263
+ if prompt_embeds is None:
264
+ prompt_2 = prompt_2 or prompt
265
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
266
+
267
+ # textual inversion: process multi-vector tokens if necessary
268
+ prompt_embeds_list = []
269
+ prompts = [prompt, prompt_2]
270
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
271
+ if isinstance(self, TextualInversionLoaderMixin):
272
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
273
+
274
+ text_inputs = tokenizer(
275
+ prompt,
276
+ padding="max_length",
277
+ max_length=tokenizer.model_max_length,
278
+ truncation=True,
279
+ return_tensors="pt",
280
+ )
281
+
282
+ text_input_ids = text_inputs.input_ids
283
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
284
+
285
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
286
+ text_input_ids, untruncated_ids
287
+ ):
288
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
289
+ logger.warning(
290
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
291
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
292
+ )
293
+
294
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
295
+
296
+ # We are only ALWAYS interested in the pooled output of the final text encoder
297
+ pooled_prompt_embeds = prompt_embeds[0]
298
+ if clip_skip is None:
299
+ prompt_embeds = prompt_embeds.hidden_states[-2]
300
+ else:
301
+ # "2" because SDXL always indexes from the penultimate layer.
302
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
303
+
304
+ prompt_embeds_list.append(prompt_embeds)
305
+
306
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
307
+
308
+ # get unconditional embeddings for classifier free guidance
309
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
310
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
311
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
312
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
313
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
314
+ negative_prompt = negative_prompt or ""
315
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
316
+
317
+ # normalize str to list
318
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
319
+ negative_prompt_2 = (
320
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
321
+ )
322
+
323
+ uncond_tokens: List[str]
324
+ if prompt is not None and type(prompt) is not type(negative_prompt):
325
+ raise TypeError(
326
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
327
+ f" {type(prompt)}."
328
+ )
329
+ elif batch_size != len(negative_prompt):
330
+ raise ValueError(
331
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
332
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
333
+ " the batch size of `prompt`."
334
+ )
335
+ else:
336
+ uncond_tokens = [negative_prompt, negative_prompt_2]
337
+
338
+ negative_prompt_embeds_list = []
339
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
340
+ if isinstance(self, TextualInversionLoaderMixin):
341
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
342
+
343
+ max_length = prompt_embeds.shape[1]
344
+ uncond_input = tokenizer(
345
+ negative_prompt,
346
+ padding="max_length",
347
+ max_length=max_length,
348
+ truncation=True,
349
+ return_tensors="pt",
350
+ )
351
+
352
+ negative_prompt_embeds = text_encoder(
353
+ uncond_input.input_ids.to(device),
354
+ output_hidden_states=True,
355
+ )
356
+ # We are only ALWAYS interested in the pooled output of the final text encoder
357
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
358
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
359
+
360
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
361
+
362
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
363
+
364
+ if self.text_encoder_2 is not None:
365
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
366
+ else:
367
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
368
+
369
+ bs_embed, seq_len, _ = prompt_embeds.shape
370
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
371
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
372
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
373
+
374
+ if do_classifier_free_guidance:
375
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
376
+ seq_len = negative_prompt_embeds.shape[1]
377
+
378
+ if self.text_encoder_2 is not None:
379
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
380
+ else:
381
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
382
+
383
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
384
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
385
+
386
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
387
+ bs_embed * num_images_per_prompt, -1
388
+ )
389
+ if do_classifier_free_guidance:
390
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
391
+ bs_embed * num_images_per_prompt, -1
392
+ )
393
+
394
+ if self.text_encoder is not None:
395
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
396
+ # Retrieve the original scale by scaling back the LoRA layers
397
+ unscale_lora_layers(self.text_encoder, lora_scale)
398
+
399
+ if self.text_encoder_2 is not None:
400
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
401
+ # Retrieve the original scale by scaling back the LoRA layers
402
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
403
+
404
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
405
+
406
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
407
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
408
+ dtype = next(self.image_encoder.parameters()).dtype
409
+
410
+ if not isinstance(image, torch.Tensor):
411
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
412
+
413
+ image = image.to(device=device, dtype=dtype)
414
+ if output_hidden_states:
415
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
416
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
417
+ uncond_image_enc_hidden_states = self.image_encoder(
418
+ torch.zeros_like(image), output_hidden_states=True
419
+ ).hidden_states[-2]
420
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
421
+ num_images_per_prompt, dim=0
422
+ )
423
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
424
+ else:
425
+ image_embeds = self.image_encoder(image).image_embeds
426
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
427
+ uncond_image_embeds = torch.zeros_like(image_embeds)
428
+
429
+ return image_embeds, uncond_image_embeds
430
+
431
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
432
+ def prepare_ip_adapter_image_embeds(
433
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
434
+ ):
435
+ image_embeds = []
436
+ if do_classifier_free_guidance:
437
+ negative_image_embeds = []
438
+ if ip_adapter_image_embeds is None:
439
+ if not isinstance(ip_adapter_image, list):
440
+ ip_adapter_image = [ip_adapter_image]
441
+
442
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
443
+ raise ValueError(
444
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
445
+ )
446
+
447
+ for single_ip_adapter_image, image_proj_layer in zip(
448
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
449
+ ):
450
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
451
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
452
+ single_ip_adapter_image, device, 1, output_hidden_state
453
+ )
454
+
455
+ image_embeds.append(single_image_embeds[None, :])
456
+ if do_classifier_free_guidance:
457
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
458
+ else:
459
+ for single_image_embeds in ip_adapter_image_embeds:
460
+ if do_classifier_free_guidance:
461
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
462
+ negative_image_embeds.append(single_negative_image_embeds)
463
+ image_embeds.append(single_image_embeds)
464
+
465
+ ip_adapter_image_embeds = []
466
+ for i, single_image_embeds in enumerate(image_embeds):
467
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
468
+ if do_classifier_free_guidance:
469
+ single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
470
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
471
+
472
+ single_image_embeds = single_image_embeds.to(device=device)
473
+ ip_adapter_image_embeds.append(single_image_embeds)
474
+
475
+ return ip_adapter_image_embeds
476
+
477
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
478
+ def prepare_extra_step_kwargs(self, generator, eta):
479
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
480
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
481
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
482
+ # and should be between [0, 1]
483
+
484
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
485
+ extra_step_kwargs = {}
486
+ if accepts_eta:
487
+ extra_step_kwargs["eta"] = eta
488
+
489
+ # check if the scheduler accepts generator
490
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
491
+ if accepts_generator:
492
+ extra_step_kwargs["generator"] = generator
493
+ return extra_step_kwargs
494
+
495
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
496
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
497
+ shape = (
498
+ batch_size,
499
+ num_channels_latents,
500
+ int(height) // self.vae_scale_factor,
501
+ int(width) // self.vae_scale_factor,
502
+ )
503
+ if isinstance(generator, list) and len(generator) != batch_size:
504
+ raise ValueError(
505
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
506
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
507
+ )
508
+
509
+ if latents is None:
510
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
511
+ else:
512
+ latents = latents.to(device)
513
+
514
+ # scale the initial noise by the standard deviation required by the scheduler
515
+ latents = latents * self.scheduler.init_noise_sigma
516
+ return latents
517
+
518
+ def _get_add_time_ids(
519
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
520
+ ):
521
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
522
+
523
+ passed_add_embed_dim = (
524
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
525
+ )
526
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
527
+
528
+ if expected_add_embed_dim != passed_add_embed_dim:
529
+ raise ValueError(
530
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
531
+ )
532
+
533
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
534
+ return add_time_ids
535
+
536
+ def upcast_vae(self):
537
+ dtype = self.vae.dtype
538
+ self.vae.to(dtype=torch.float32)
539
+ use_torch_2_0_or_xformers = isinstance(
540
+ self.vae.decoder.mid_block.attentions[0].processor,
541
+ (
542
+ AttnProcessor2_0,
543
+ XFormersAttnProcessor,
544
+ FusedAttnProcessor2_0,
545
+ ),
546
+ )
547
+ # if xformers or torch_2_0 is used attention block does not need
548
+ # to be in float32 which can save lots of memory
549
+ if use_torch_2_0_or_xformers:
550
+ self.vae.post_quant_conv.to(dtype)
551
+ self.vae.decoder.conv_in.to(dtype)
552
+ self.vae.decoder.mid_block.to(dtype)
553
+
554
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
555
+ def get_guidance_scale_embedding(
556
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
557
+ ) -> torch.Tensor:
558
+ """
559
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
560
+
561
+ Args:
562
+ w (`torch.Tensor`):
563
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
564
+ embedding_dim (`int`, *optional*, defaults to 512):
565
+ Dimension of the embeddings to generate.
566
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
567
+ Data type of the generated embeddings.
568
+
569
+ Returns:
570
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
571
+ """
572
+ assert len(w.shape) == 1
573
+ w = w * 1000.0
574
+
575
+ half_dim = embedding_dim // 2
576
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
577
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
578
+ emb = w.to(dtype)[:, None] * emb[None, :]
579
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
580
+ if embedding_dim % 2 == 1: # zero pad
581
+ emb = torch.nn.functional.pad(emb, (0, 1))
582
+ assert emb.shape == (w.shape[0], embedding_dim)
583
+ return emb
584
+
585
+ @property
586
+ def guidance_scale(self):
587
+ return self._guidance_scale
588
+
589
+ @property
590
+ def guidance_rescale(self):
591
+ return self._guidance_rescale
592
+
593
+ @property
594
+ def clip_skip(self):
595
+ return self._clip_skip
596
+
597
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
598
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
599
+ # corresponds to doing no classifier free guidance.
600
+ @property
601
+ def do_classifier_free_guidance(self):
602
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
603
+
604
+ @property
605
+ def cross_attention_kwargs(self):
606
+ return self._cross_attention_kwargs
607
+
608
+ @property
609
+ def denoising_end(self):
610
+ return self._denoising_end
611
+
612
+ @property
613
+ def num_timesteps(self):
614
+ return self._num_timesteps
615
+
616
+ @property
617
+ def interrupt(self):
618
+ return self._interrupt
619
+
620
+ @torch.no_grad()
621
+ def __call__(
622
+ self,
623
+ prompt: Union[str, List[str]] = None,
624
+ prompt_2: Optional[Union[str, List[str]]] = None,
625
+ height: Optional[int] = None,
626
+ width: Optional[int] = None,
627
+ num_inference_steps: int = 50,
628
+ timesteps: List[int] = None,
629
+ sigmas: List[float] = None,
630
+ denoising_end: Optional[float] = None,
631
+ guidance_scale: float = 5.0,
632
+ negative_prompt: Optional[Union[str, List[str]]] = None,
633
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
634
+ num_images_per_prompt: Optional[int] = 1,
635
+ eta: float = 0.0,
636
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
637
+ latents: Optional[torch.Tensor] = None,
638
+ prompt_embeds: Optional[torch.Tensor] = None,
639
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
640
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
641
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
642
+ ip_adapter_image: Optional[PipelineImageInput] = None,
643
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
644
+ output_type: Optional[str] = "pil",
645
+ return_dict: bool = True,
646
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
647
+ guidance_rescale: float = 0.0,
648
+ end_cfg: float = 0.9,
649
+ original_size: Optional[Tuple[int, int]] = None,
650
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
651
+ target_size: Optional[Tuple[int, int]] = None,
652
+ negative_original_size: Optional[Tuple[int, int]] = None,
653
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
654
+ negative_target_size: Optional[Tuple[int, int]] = None,
655
+ clip_skip: Optional[int] = None,
656
+ callback_on_step_end: Optional[
657
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
658
+ ] = None,
659
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
660
+ **kwargs,
661
+ ):
662
+ callback = kwargs.pop("callback", None)
663
+ callback_steps = kwargs.pop("callback_steps", None)
664
+
665
+ if callback is not None:
666
+ deprecate(
667
+ "callback",
668
+ "1.0.0",
669
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
670
+ )
671
+ if callback_steps is not None:
672
+ deprecate(
673
+ "callback_steps",
674
+ "1.0.0",
675
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
676
+ )
677
+
678
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
679
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
680
+
681
+ # 0. Default height and width to unet
682
+ height = height or self.default_sample_size * self.vae_scale_factor
683
+ width = width or self.default_sample_size * self.vae_scale_factor
684
+
685
+ original_size = original_size or (height, width)
686
+ target_size = target_size or (height, width)
687
+
688
+ self._guidance_scale = guidance_scale
689
+ self._guidance_rescale = guidance_rescale
690
+ self._clip_skip = clip_skip
691
+ self._cross_attention_kwargs = cross_attention_kwargs
692
+ self._denoising_end = denoising_end
693
+ self._interrupt = False
694
+
695
+ # 2. Define call parameters
696
+ if prompt is not None and isinstance(prompt, str):
697
+ batch_size = 1
698
+ elif prompt is not None and isinstance(prompt, list):
699
+ batch_size = len(prompt)
700
+ else:
701
+ batch_size = prompt_embeds.shape[0]
702
+
703
+ device = self._execution_device
704
+
705
+ # 3. Encode input prompt
706
+ lora_scale = (
707
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
708
+ )
709
+
710
+ (
711
+ prompt_embeds,
712
+ negative_prompt_embeds,
713
+ pooled_prompt_embeds,
714
+ negative_pooled_prompt_embeds,
715
+ ) = self.encode_prompt(
716
+ prompt=prompt,
717
+ prompt_2=prompt_2,
718
+ device=device,
719
+ num_images_per_prompt=num_images_per_prompt,
720
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
721
+ negative_prompt=negative_prompt,
722
+ negative_prompt_2=negative_prompt_2,
723
+ prompt_embeds=prompt_embeds,
724
+ negative_prompt_embeds=negative_prompt_embeds,
725
+ pooled_prompt_embeds=pooled_prompt_embeds,
726
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
727
+ lora_scale=lora_scale,
728
+ clip_skip=self.clip_skip,
729
+ )
730
+
731
+ # 4. Prepare timesteps
732
+ timesteps, num_inference_steps = retrieve_timesteps(
733
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
734
+ )
735
+
736
+ # 5. Prepare latent variables
737
+ num_channels_latents = self.unet.config.in_channels
738
+ latents = self.prepare_latents(
739
+ batch_size * num_images_per_prompt,
740
+ num_channels_latents,
741
+ height,
742
+ width,
743
+ prompt_embeds.dtype,
744
+ device,
745
+ generator,
746
+ latents,
747
+ )
748
+
749
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
750
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
751
+
752
+ # 7. Prepare added time ids & embeddings
753
+ add_text_embeds = pooled_prompt_embeds
754
+ if self.text_encoder_2 is None:
755
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
756
+ else:
757
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
758
+
759
+ add_time_ids = self._get_add_time_ids(
760
+ original_size,
761
+ crops_coords_top_left,
762
+ target_size,
763
+ dtype=prompt_embeds.dtype,
764
+ text_encoder_projection_dim=text_encoder_projection_dim,
765
+ )
766
+ if negative_original_size is not None and negative_target_size is not None:
767
+ negative_add_time_ids = self._get_add_time_ids(
768
+ negative_original_size,
769
+ negative_crops_coords_top_left,
770
+ negative_target_size,
771
+ dtype=prompt_embeds.dtype,
772
+ text_encoder_projection_dim=text_encoder_projection_dim,
773
+ )
774
+ else:
775
+ negative_add_time_ids = add_time_ids
776
+
777
+ if self.do_classifier_free_guidance:
778
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
779
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
780
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
781
+
782
+ prompt_embeds = prompt_embeds.to(device)
783
+ add_text_embeds = add_text_embeds.to(device)
784
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
785
+
786
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
787
+ image_embeds = self.prepare_ip_adapter_image_embeds(
788
+ ip_adapter_image,
789
+ ip_adapter_image_embeds,
790
+ device,
791
+ batch_size * num_images_per_prompt,
792
+ self.do_classifier_free_guidance,
793
+ )
794
+
795
+ # 8. Denoising loop
796
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
797
+
798
+ # 8.1 Apply denoising_end
799
+ if (
800
+ self.denoising_end is not None
801
+ and isinstance(self.denoising_end, float)
802
+ and self.denoising_end > 0
803
+ and self.denoising_end < 1
804
+ ):
805
+ discrete_timestep_cutoff = int(
806
+ round(
807
+ self.scheduler.config.num_train_timesteps
808
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
809
+ )
810
+ )
811
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
812
+ timesteps = timesteps[:num_inference_steps]
813
+
814
+ # 9. Optionally get Guidance Scale Embedding
815
+ timestep_cond = None
816
+ if self.unet.config.time_cond_proj_dim is not None:
817
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
818
+ timestep_cond = self.get_guidance_scale_embedding(
819
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
820
+ ).to(device=device, dtype=latents.dtype)
821
+
822
+ self._num_timesteps = len(timesteps)
823
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
824
+ do_classifier_free_guidance = self.do_classifier_free_guidance
825
+ for i, t in enumerate(timesteps):
826
+ if self.interrupt:
827
+ continue
828
+ if end_cfg is not None and i / num_inference_steps > end_cfg and do_classifier_free_guidance:
829
+ do_classifier_free_guidance = False
830
+ prompt_embeds = 1.5*torch.chunk(prompt_embeds, 2, dim=0)[-1]
831
+ add_text_embeds = 1.5*torch.chunk(add_text_embeds, 2, dim=0)[-1]
832
+ add_time_ids = 1.25*torch.chunk(add_time_ids, 2, dim=0)[-1]
833
+ # expand the latents if we are doing classifier free guidance
834
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
835
+
836
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
837
+
838
+ # predict the noise residual
839
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
840
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
841
+ added_cond_kwargs["image_embeds"] = image_embeds
842
+ noise_pred = self.unet(
843
+ latent_model_input,
844
+ t,
845
+ encoder_hidden_states=prompt_embeds,
846
+ timestep_cond=timestep_cond,
847
+ cross_attention_kwargs=self.cross_attention_kwargs,
848
+ added_cond_kwargs=added_cond_kwargs,
849
+ return_dict=False,
850
+ )[0]
851
+
852
+ # perform guidance
853
+ if do_classifier_free_guidance:
854
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
855
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
856
+
857
+ if do_classifier_free_guidance and self.guidance_rescale > 0.0:
858
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
859
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
860
+
861
+ # compute the previous noisy sample x_t -> x_t-1
862
+ latents_dtype = latents.dtype
863
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
864
+ if latents.dtype != latents_dtype:
865
+ if torch.backends.mps.is_available():
866
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
867
+ latents = latents.to(latents_dtype)
868
+
869
+ if callback_on_step_end is not None:
870
+ callback_kwargs = {}
871
+ for k in callback_on_step_end_tensor_inputs:
872
+ callback_kwargs[k] = locals()[k]
873
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
874
+
875
+ latents = callback_outputs.pop("latents", latents)
876
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
877
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
878
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
879
+ negative_pooled_prompt_embeds = callback_outputs.pop(
880
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
881
+ )
882
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
883
+ negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
884
+
885
+ # call the callback, if provided
886
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
887
+ progress_bar.update()
888
+ if callback is not None and i % callback_steps == 0:
889
+ step_idx = i // getattr(self.scheduler, "order", 1)
890
+ callback(step_idx, t, latents)
891
+
892
+ if XLA_AVAILABLE:
893
+ xm.mark_step()
894
+
895
+ if not output_type == "latent":
896
+ # make sure the VAE is in float32 mode, as it overflows in float16
897
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
898
+
899
+ if needs_upcasting:
900
+ self.upcast_vae()
901
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
902
+ elif latents.dtype != self.vae.dtype:
903
+ if torch.backends.mps.is_available():
904
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
905
+ self.vae = self.vae.to(latents.dtype)
906
+
907
+ # unscale/denormalize the latents
908
+ # denormalize with the mean and std if available and not None
909
+ has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
910
+ has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
911
+ if has_latents_mean and has_latents_std:
912
+ latents_mean = (
913
+ torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
914
+ )
915
+ latents_std = (
916
+ torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
917
+ )
918
+ latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
919
+ else:
920
+ latents = latents / self.vae.config.scaling_factor
921
+
922
+ image = self.vae.decode(latents, return_dict=False)[0]
923
+
924
+ # cast back to fp16 if needed
925
+ if needs_upcasting:
926
+ self.vae.to(dtype=torch.float16)
927
+ else:
928
+ image = latents
929
+
930
+ if not output_type == "latent":
931
+ # apply watermark if available
932
+ if self.watermark is not None:
933
+ image = self.watermark.apply_watermark(image)
934
+
935
+ image = self.image_processor.postprocess(image, output_type=output_type)
936
+
937
+ # Offload all models
938
+ self.maybe_free_model_hooks()
939
+
940
+ if not return_dict:
941
+ return (image,)
942
+
943
+ return StableDiffusionXLPipelineOutput(images=image)
944
 
945
  def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
946
+ """Load and prepare the pipeline."""
947
  if not pipeline:
948
+ pipeline = StableDiffusionXLPipeline_new.from_pretrained(
949
  "./models/newdream-sdxl-20",
950
  torch_dtype=torch.float16,
951
  local_files_only=True,
952
+ ).to("cuda")
953
+
954
  pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
955
  pipeline = compile_pipe(pipeline)
956
+ load_pipe(pipeline, dir="cached_pipe")
957
 
958
+ # Warm-up runs
959
  for _ in range(4):
960
+ pipeline(
961
+ prompt="a cute Halloween ghost couple",
962
+ num_inference_steps=13
963
+ )
964
  pipeline.scheduler.prepare_loss()
965
  return pipeline
966
 
 
 
 
 
 
967
 
968
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
969
+ """Generate image from text prompt."""
970
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
971
+
972
+ image_0 = pipeline(
973
  prompt=request.prompt,
974
  negative_prompt=request.negative_prompt,
975
  width=request.width,
976
  height=request.height,
977
  generator=generator,
978
+ num_inference_steps=13,
 
 
 
979
  ).images[0]
980
 
981
+ filter_image = max_pixel_filter(image_0)
982
+ return filter_image
983
+
984
+
uv.lock CHANGED
@@ -34,6 +34,19 @@ version = "4.9.3"
34
  source = { registry = "https://pypi.org/simple" }
35
  sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034 }
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  [[package]]
38
  name = "certifi"
39
  version = "2024.8.30"
@@ -101,6 +114,7 @@ version = "6"
101
  source = { editable = "." }
102
  dependencies = [
103
  { name = "accelerate" },
 
104
  { name = "diffusers" },
105
  { name = "edge-maxxing-pipelines" },
106
  { name = "huggingface-hub" },
@@ -110,6 +124,8 @@ dependencies = [
110
  { name = "onediffx" },
111
  { name = "oneflow" },
112
  { name = "setuptools" },
 
 
113
  { name = "torch" },
114
  { name = "torchvision" },
115
  { name = "transformers" },
@@ -120,15 +136,18 @@ dependencies = [
120
  [package.metadata]
121
  requires-dist = [
122
  { name = "accelerate", specifier = "==0.31.0" },
 
123
  { name = "diffusers", specifier = "==0.28.2" },
124
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines" },
125
- { name = "huggingface-hub", specifier = "==0.24.7" },
126
  { name = "numpy", specifier = "==1.26.4" },
127
  { name = "omegaconf", specifier = "==2.3.0" },
128
  { name = "onediff", specifier = "==1.2.0" },
129
  { name = "onediffx", specifier = "==1.2.0" },
130
  { name = "oneflow", url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" },
131
- { name = "setuptools", specifier = "==75.2.0" },
 
 
132
  { name = "torch", specifier = "==2.2.2" },
133
  { name = "torchvision", specifier = "==0.17.2" },
134
  { name = "transformers", specifier = "==4.41.2" },
@@ -164,7 +183,7 @@ wheels = [
164
 
165
  [[package]]
166
  name = "huggingface-hub"
167
- version = "0.24.7"
168
  source = { registry = "https://pypi.org/simple" }
169
  dependencies = [
170
  { name = "filelock" },
@@ -175,9 +194,9 @@ dependencies = [
175
  { name = "tqdm" },
176
  { name = "typing-extensions" },
177
  ]
178
- sdist = { url = "https://files.pythonhosted.org/packages/af/33/d252098a3b8d910065ad09cf318efb5dbe6c8bb586269bdfb47b7e021020/huggingface_hub-0.24.7.tar.gz", hash = "sha256:0ad8fb756e2831da0ac0491175b960f341fe06ebcf80ed6f8728313f95fc0207", size = 349211 }
179
  wheels = [
180
- { url = "https://files.pythonhosted.org/packages/57/28/a0b0dd3cca63908045edc300360d6cd8758d4d86eee3fd2b08f00c5a41c4/huggingface_hub-0.24.7-py3-none-any.whl", hash = "sha256:a212c555324c8a7b1ffdd07266bb7e7d69ca71aa238d27b7842d65e9a26ac3e5", size = 417514 },
181
  ]
182
 
183
  [[package]]
@@ -758,6 +777,31 @@ wheels = [
758
  { url = "https://files.pythonhosted.org/packages/31/2d/90165d51ecd38f9a02c6832198c13a4e48652485e2ccf863ebb942c531b6/setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8", size = 1249825 },
759
  ]
760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
761
  [[package]]
762
  name = "sympy"
763
  version = "1.13.3"
@@ -800,6 +844,18 @@ wheels = [
800
  { url = "https://files.pythonhosted.org/packages/45/b6/36c1bb106bbe96012c9367df89ed01599cada036c0b96d38fbbdbeb75c9f/tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75", size = 9945103 },
801
  ]
802
 
 
 
 
 
 
 
 
 
 
 
 
 
803
  [[package]]
804
  name = "torch"
805
  version = "2.2.2"
@@ -932,4 +988,4 @@ source = { registry = "https://pypi.org/simple" }
932
  sdist = { url = "https://files.pythonhosted.org/packages/54/bf/5c0000c44ebc80123ecbdddba1f5dcd94a5ada602a9c225d84b5aaa55e86/zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29", size = 24199 }
933
  wheels = [
934
  { url = "https://files.pythonhosted.org/packages/62/8b/5ba542fa83c90e09eac972fc9baca7a88e7e7ca4b221a89251954019308b/zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350", size = 9200 },
935
- ]
 
34
  source = { registry = "https://pypi.org/simple" }
35
  sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034 }
36
 
37
+ [[package]]
38
+ name = "bitsandbytes"
39
+ version = "0.44.1"
40
+ source = { registry = "https://pypi.org/simple" }
41
+ dependencies = [
42
+ { name = "numpy" },
43
+ { name = "torch" },
44
+ ]
45
+ wheels = [
46
+ { url = "https://files.pythonhosted.org/packages/e4/e6/ccb84da7ffaf208a71c2c3c8e1120b34759df640db959660be9a98505eb4/bitsandbytes-0.44.1-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:b2f24c6cbf11fc8c5d69b3dcecee9f7011451ec59d6ac833e873c9f105259668", size = 122419627 },
47
+ { url = "https://files.pythonhosted.org/packages/5f/f5/11bddebb5addc0a005b0c1cecc6e4c6e4055ad7b860bdcbf6374e12a51f5/bitsandbytes-0.44.1-py3-none-win_amd64.whl", hash = "sha256:8e68e12aa25d2cf9a1730ad72890a5d1a19daa23f459a6a4679331f353d58cb4", size = 121451331 },
48
+ ]
49
+
50
  [[package]]
51
  name = "certifi"
52
  version = "2024.8.30"
 
114
  source = { editable = "." }
115
  dependencies = [
116
  { name = "accelerate" },
117
+ { name = "bitsandbytes" },
118
  { name = "diffusers" },
119
  { name = "edge-maxxing-pipelines" },
120
  { name = "huggingface-hub" },
 
124
  { name = "onediffx" },
125
  { name = "oneflow" },
126
  { name = "setuptools" },
127
+ { name = "stable-fast" },
128
+ { name = "tomesd" },
129
  { name = "torch" },
130
  { name = "torchvision" },
131
  { name = "transformers" },
 
136
  [package.metadata]
137
  requires-dist = [
138
  { name = "accelerate", specifier = "==0.31.0" },
139
+ { name = "bitsandbytes", specifier = ">=0.44.1" },
140
  { name = "diffusers", specifier = "==0.28.2" },
141
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines" },
142
+ { name = "huggingface-hub", specifier = "==0.25.2" },
143
  { name = "numpy", specifier = "==1.26.4" },
144
  { name = "omegaconf", specifier = "==2.3.0" },
145
  { name = "onediff", specifier = "==1.2.0" },
146
  { name = "onediffx", specifier = "==1.2.0" },
147
  { name = "oneflow", url = "https://github.com/siliconflow/oneflow_releases/releases/download/community_cu118/oneflow-0.9.1.dev20240802%2Bcu118-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" },
148
+ { name = "setuptools", specifier = ">=75.2.0" },
149
+ { name = "stable-fast", url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" },
150
+ { name = "tomesd", specifier = ">=0.1.3" },
151
  { name = "torch", specifier = "==2.2.2" },
152
  { name = "torchvision", specifier = "==0.17.2" },
153
  { name = "transformers", specifier = "==4.41.2" },
 
183
 
184
  [[package]]
185
  name = "huggingface-hub"
186
+ version = "0.25.2"
187
  source = { registry = "https://pypi.org/simple" }
188
  dependencies = [
189
  { name = "filelock" },
 
194
  { name = "tqdm" },
195
  { name = "typing-extensions" },
196
  ]
197
+ sdist = { url = "https://files.pythonhosted.org/packages/df/fd/5f81bae67096c5ab50d29a0230b8374f0245916cca192f8ee2fada51f4f6/huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c", size = 365806 }
198
  wheels = [
199
+ { url = "https://files.pythonhosted.org/packages/64/09/a535946bf2dc88e61341f39dc507530411bb3ea4eac493e5ec833e8f35bd/huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25", size = 436575 },
200
  ]
201
 
202
  [[package]]
 
777
  { url = "https://files.pythonhosted.org/packages/31/2d/90165d51ecd38f9a02c6832198c13a4e48652485e2ccf863ebb942c531b6/setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8", size = 1249825 },
778
  ]
779
 
780
+ [[package]]
781
+ name = "stable-fast"
782
+ version = "1.0.5+torch222cu121"
783
+ source = { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl" }
784
+ dependencies = [
785
+ { name = "torch" },
786
+ ]
787
+ wheels = [
788
+ { url = "https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:03d193666c52016096ecfdf0e8e2183fb5aa8ea51e99d5132b353a1e9a6c1264" },
789
+ ]
790
+
791
+ [package.metadata]
792
+ requires-dist = [
793
+ { name = "diffusers", marker = "extra == 'diffusers'", specifier = ">=0.19.0" },
794
+ { name = "numpy", marker = "extra == 'dev'" },
795
+ { name = "opencv-python", marker = "extra == 'dev'" },
796
+ { name = "pillow", marker = "extra == 'dev'" },
797
+ { name = "prettytable", marker = "extra == 'dev'" },
798
+ { name = "pytest", marker = "extra == 'dev'" },
799
+ { name = "torch" },
800
+ { name = "transformers", marker = "extra == 'diffusers'" },
801
+ { name = "triton", marker = "extra == 'triton'", specifier = ">=2.1.0" },
802
+ { name = "xformers", marker = "extra == 'xformers'", specifier = ">=0.0.20" },
803
+ ]
804
+
805
  [[package]]
806
  name = "sympy"
807
  version = "1.13.3"
 
844
  { url = "https://files.pythonhosted.org/packages/45/b6/36c1bb106bbe96012c9367df89ed01599cada036c0b96d38fbbdbeb75c9f/tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75", size = 9945103 },
845
  ]
846
 
847
+ [[package]]
848
+ name = "tomesd"
849
+ version = "0.1.3"
850
+ source = { registry = "https://pypi.org/simple" }
851
+ dependencies = [
852
+ { name = "torch" },
853
+ ]
854
+ sdist = { url = "https://files.pythonhosted.org/packages/29/37/ed74c7449fe5a8a4726be3dab4d879d000babf6ea538658171933b1f000e/tomesd-0.1.3.tar.gz", hash = "sha256:15bba2e952f4643c8355951e892fda918ddccbdff2238dc368d42bd078fcedc9", size = 14032 }
855
+ wheels = [
856
+ { url = "https://files.pythonhosted.org/packages/0c/02/367c67c8f510313f143a7818e92254a5f861c7d94c98ad6a08d25db52fee/tomesd-0.1.3-py3-none-any.whl", hash = "sha256:3d5aa0857fe2c2aab253891050601ca13a87d8d7a99b6760b9ca0856aa0c6355", size = 11467 },
857
+ ]
858
+
859
  [[package]]
860
  name = "torch"
861
  version = "2.2.2"
 
988
  sdist = { url = "https://files.pythonhosted.org/packages/54/bf/5c0000c44ebc80123ecbdddba1f5dcd94a5ada602a9c225d84b5aaa55e86/zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29", size = 24199 }
989
  wheels = [
990
  { url = "https://files.pythonhosted.org/packages/62/8b/5ba542fa83c90e09eac972fc9baca7a88e7e7ca4b221a89251954019308b/zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350", size = 9200 },
991
+ ]