kevinlu4588
commited on
Commit
·
9985d49
1
Parent(s):
04578d3
Setting up esd code
Browse files- Attack_code/Inpainting/inpainting_diffuser.py +612 -0
- Attack_code/Inpainting/inpainting_diffusion.ipynb +0 -0
- Attack_code/Noisy Diffusion(Eta attack)/clilp_utils.py +366 -0
- Attack_code/Noisy Diffusion(Eta attack)/clip_gen_tests.ipynb +0 -0
- Attack_code/Noisy Diffusion(Eta attack)/eta_diffusion.py +661 -0
- Attack_code/Noisy Diffusion(Eta attack)/gradient_asc_clip.ipynb +0 -0
- ESD Training Scripts/esd_diffusers.py +560 -0
- Pipfile +19 -0
- Pipfile.lock +981 -0
- README.md +48 -1
- esd-vangogh_from_vangogh-noxattn_1-epochs_200.pt → models/ESD-U/esd-vangogh_from_vangogh-noxattn_1-epochs_200.pt +0 -0
- esd-picasso_from_picasso-xattn_1-epochs_200.pt → models/ESD-X/esd-picasso_from_picasso-xattn_1-epochs_200.pt +0 -0
Attack_code/Inpainting/inpainting_diffuser.py
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from matplotlib import pyplot as plt
|
| 3 |
+
import textwrap
|
| 4 |
+
import argparse
|
| 5 |
+
import torch
|
| 6 |
+
import copy
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import numpy as np
|
| 10 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from tqdm.auto import tqdm
|
| 13 |
+
from transformers import CLIPTextModel, CLIPTokenizer, CLIPFeatureExtractor
|
| 14 |
+
from diffusers.schedulers import EulerAncestralDiscreteScheduler
|
| 15 |
+
from eta_diffusers.src.diffusers.schedulers.eta_ddim_scheduler import DDIMScheduler
|
| 16 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
|
| 17 |
+
from diffusers.schedulers.scheduling_lms_discrete import LMSDiscreteScheduler
|
| 18 |
+
# from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 19 |
+
|
| 20 |
+
def show_image_grid(img_files, num_rows=3, num_cols=4, fig_size=(15, 10)):
|
| 21 |
+
# Create a grid to display the images
|
| 22 |
+
fig, axes = plt.subplots(num_rows, num_cols, figsize=fig_size)
|
| 23 |
+
|
| 24 |
+
# Plot each image in the grid row-wise
|
| 25 |
+
for i, ax in enumerate(axes.flatten()):
|
| 26 |
+
img_index = i # row-major order
|
| 27 |
+
if img_index < len(img_files):
|
| 28 |
+
img = img_files[img_index]
|
| 29 |
+
ax.imshow(img)
|
| 30 |
+
ax.axis('off') # Turn off axis labels
|
| 31 |
+
|
| 32 |
+
plt.tight_layout()
|
| 33 |
+
plt.show()
|
| 34 |
+
|
| 35 |
+
# Example usage
|
| 36 |
+
# img_files = [image1, image2, image3, ...] # Replace with actual images
|
| 37 |
+
# show_image_grid(img_files)
|
| 38 |
+
|
| 39 |
+
def to_gif(images, path):
|
| 40 |
+
|
| 41 |
+
images[0].save(path, save_all=True,
|
| 42 |
+
append_images=images[1:], loop=0, duration=len(images) * 20)
|
| 43 |
+
|
| 44 |
+
def figure_to_image(figure):
|
| 45 |
+
|
| 46 |
+
figure.set_dpi(300)
|
| 47 |
+
|
| 48 |
+
figure.canvas.draw()
|
| 49 |
+
|
| 50 |
+
return Image.frombytes('RGB', figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
|
| 51 |
+
|
| 52 |
+
def image_grid(images, outpath=None, column_titles=None, row_titles=None):
|
| 53 |
+
|
| 54 |
+
n_rows = len(images)
|
| 55 |
+
n_cols = len(images[0])
|
| 56 |
+
|
| 57 |
+
fig, axs = plt.subplots(nrows=n_rows, ncols=n_cols,
|
| 58 |
+
figsize=(n_cols, n_rows), squeeze=False)
|
| 59 |
+
|
| 60 |
+
for row, _images in enumerate(images):
|
| 61 |
+
|
| 62 |
+
for column, image in enumerate(_images):
|
| 63 |
+
ax = axs[row][column]
|
| 64 |
+
ax.imshow(image)
|
| 65 |
+
if column_titles and row == 0:
|
| 66 |
+
ax.set_title(textwrap.fill(
|
| 67 |
+
column_titles[column], width=12), fontsize='x-small')
|
| 68 |
+
if row_titles and column == 0:
|
| 69 |
+
ax.set_ylabel(row_titles[row], rotation=0, fontsize='x-small', labelpad=1.6 * len(row_titles[row]))
|
| 70 |
+
ax.set_xticks([])
|
| 71 |
+
ax.set_yticks([])
|
| 72 |
+
|
| 73 |
+
plt.subplots_adjust(wspace=0, hspace=0)
|
| 74 |
+
|
| 75 |
+
if outpath is not None:
|
| 76 |
+
plt.savefig(outpath, bbox_inches='tight', dpi=300)
|
| 77 |
+
plt.close()
|
| 78 |
+
else:
|
| 79 |
+
plt.tight_layout(pad=0)
|
| 80 |
+
image = figure_to_image(plt.gcf())
|
| 81 |
+
plt.close()
|
| 82 |
+
return image
|
| 83 |
+
|
| 84 |
+
def get_module(module, module_name):
|
| 85 |
+
|
| 86 |
+
if isinstance(module_name, str):
|
| 87 |
+
module_name = module_name.split('.')
|
| 88 |
+
|
| 89 |
+
if len(module_name) == 0:
|
| 90 |
+
return module
|
| 91 |
+
else:
|
| 92 |
+
module = getattr(module, module_name[0])
|
| 93 |
+
return get_module(module, module_name[1:])
|
| 94 |
+
|
| 95 |
+
def set_module(module, module_name, new_module):
|
| 96 |
+
|
| 97 |
+
if isinstance(module_name, str):
|
| 98 |
+
module_name = module_name.split('.')
|
| 99 |
+
|
| 100 |
+
if len(module_name) == 1:
|
| 101 |
+
return setattr(module, module_name[0], new_module)
|
| 102 |
+
else:
|
| 103 |
+
module = getattr(module, module_name[0])
|
| 104 |
+
return set_module(module, module_name[1:], new_module)
|
| 105 |
+
|
| 106 |
+
def freeze(module):
|
| 107 |
+
|
| 108 |
+
for parameter in module.parameters():
|
| 109 |
+
|
| 110 |
+
parameter.requires_grad = False
|
| 111 |
+
|
| 112 |
+
def unfreeze(module):
|
| 113 |
+
|
| 114 |
+
for parameter in module.parameters():
|
| 115 |
+
|
| 116 |
+
parameter.requires_grad = True
|
| 117 |
+
|
| 118 |
+
def get_concat_h(im1, im2):
|
| 119 |
+
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
|
| 120 |
+
dst.paste(im1, (0, 0))
|
| 121 |
+
dst.paste(im2, (im1.width, 0))
|
| 122 |
+
return dst
|
| 123 |
+
|
| 124 |
+
def get_concat_v(im1, im2):
|
| 125 |
+
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
|
| 126 |
+
dst.paste(im1, (0, 0))
|
| 127 |
+
dst.paste(im2, (0, im1.height))
|
| 128 |
+
return dst
|
| 129 |
+
|
| 130 |
+
class StableDiffuser(torch.nn.Module):
|
| 131 |
+
|
| 132 |
+
def __init__(self,
|
| 133 |
+
scheduler='DDIM'
|
| 134 |
+
):
|
| 135 |
+
print('code changed')
|
| 136 |
+
|
| 137 |
+
super().__init__()
|
| 138 |
+
|
| 139 |
+
# Load the autoencoder model which will be used to decode the latents into image space.
|
| 140 |
+
self.vae = AutoencoderKL.from_pretrained(
|
| 141 |
+
"CompVis/stable-diffusion-v1-4", subfolder="vae")
|
| 142 |
+
print(self.vae.config.scaling_factor )
|
| 143 |
+
# Load the tokenizer and text encoder to tokenize and encode the text.
|
| 144 |
+
self.tokenizer = CLIPTokenizer.from_pretrained(
|
| 145 |
+
"openai/clip-vit-large-patch14")
|
| 146 |
+
self.text_encoder = CLIPTextModel.from_pretrained(
|
| 147 |
+
"openai/clip-vit-large-patch14")
|
| 148 |
+
|
| 149 |
+
# The UNet model for generating the latents.
|
| 150 |
+
self.unet = UNet2DConditionModel.from_pretrained(
|
| 151 |
+
"CompVis/stable-diffusion-v1-4", subfolder="unet")
|
| 152 |
+
|
| 153 |
+
self.feature_extractor = CLIPFeatureExtractor.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="feature_extractor")
|
| 154 |
+
# self.safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="safety_checker")
|
| 155 |
+
|
| 156 |
+
if scheduler == 'LMS':
|
| 157 |
+
self.scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 158 |
+
elif scheduler == 'DDIM':
|
| 159 |
+
self.scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 160 |
+
elif scheduler == 'DDPM':
|
| 161 |
+
self.scheduler = DDPMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 162 |
+
|
| 163 |
+
self.eval()
|
| 164 |
+
|
| 165 |
+
def get_noise(self, batch_size, latent_width, latent_height, generator=None):
|
| 166 |
+
|
| 167 |
+
param = list(self.parameters())[0]
|
| 168 |
+
|
| 169 |
+
return torch.randn(
|
| 170 |
+
(batch_size, self.unet.in_channels, latent_width, latent_height),
|
| 171 |
+
generator=generator).type(param.dtype).to(param.device)
|
| 172 |
+
|
| 173 |
+
def add_noise(self, latents, noise, step):
|
| 174 |
+
|
| 175 |
+
return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]]))
|
| 176 |
+
|
| 177 |
+
def text_tokenize(self, prompts):
|
| 178 |
+
tokens = self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
| 179 |
+
print("prompts", prompts)
|
| 180 |
+
print("tokens", tokens)
|
| 181 |
+
return tokens
|
| 182 |
+
|
| 183 |
+
def text_detokenize(self, tokens):
|
| 184 |
+
|
| 185 |
+
return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1]
|
| 186 |
+
|
| 187 |
+
def text_encode(self, tokens):
|
| 188 |
+
|
| 189 |
+
return self.text_encoder(tokens.input_ids.to(self.unet.device))[0]
|
| 190 |
+
|
| 191 |
+
def decode(self, latents):
|
| 192 |
+
print(self.vae.config.scaling_factor)
|
| 193 |
+
print(latents)
|
| 194 |
+
print(1 / self.vae.config.scaling_factor * latents)
|
| 195 |
+
print(self.vae.decode(1 / self.vae.config.scaling_factor * latents))
|
| 196 |
+
print(self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample)
|
| 197 |
+
return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample
|
| 198 |
+
|
| 199 |
+
def encode(self, tensors):
|
| 200 |
+
|
| 201 |
+
return self.vae.encode(tensors).latent_dist.mode() * 0.18215
|
| 202 |
+
|
| 203 |
+
def to_image(self, image):
|
| 204 |
+
|
| 205 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 206 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 207 |
+
print(image)
|
| 208 |
+
images = (image * 255).round().astype("uint8")
|
| 209 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 210 |
+
|
| 211 |
+
return pil_images
|
| 212 |
+
|
| 213 |
+
def set_scheduler_timesteps(self, n_steps):
|
| 214 |
+
self.scheduler.set_timesteps(n_steps, device=self.unet.device)
|
| 215 |
+
|
| 216 |
+
def get_initial_latents(self, n_imgs, latent_width, latent_height, n_prompts, generator=None):
|
| 217 |
+
|
| 218 |
+
noise = self.get_noise(n_imgs, latent_width, latent_height, generator=generator).repeat(n_prompts, 1, 1, 1)
|
| 219 |
+
|
| 220 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 221 |
+
|
| 222 |
+
return latents
|
| 223 |
+
|
| 224 |
+
def get_noise(self, batch_size, latent_width, latent_height, generator=None):
|
| 225 |
+
|
| 226 |
+
param = list(self.parameters())[0]
|
| 227 |
+
|
| 228 |
+
return torch.randn(
|
| 229 |
+
(batch_size, self.unet.in_channels, latent_width, latent_height),
|
| 230 |
+
generator=generator).type(param.dtype).to(param.device)
|
| 231 |
+
|
| 232 |
+
def add_noise(self, latents, noise, step):
|
| 233 |
+
|
| 234 |
+
return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]]))
|
| 235 |
+
|
| 236 |
+
def text_tokenize(self, prompts):
|
| 237 |
+
|
| 238 |
+
return self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
| 239 |
+
|
| 240 |
+
def text_detokenize(self, tokens):
|
| 241 |
+
|
| 242 |
+
return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1]
|
| 243 |
+
|
| 244 |
+
def text_encode(self, tokens):
|
| 245 |
+
|
| 246 |
+
return self.text_encoder(tokens.input_ids.to(self.unet.device))[0]
|
| 247 |
+
|
| 248 |
+
def decode(self, latents):
|
| 249 |
+
|
| 250 |
+
return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample
|
| 251 |
+
|
| 252 |
+
def encode(self, tensors):
|
| 253 |
+
|
| 254 |
+
return self.vae.encode(tensors).latent_dist.mode() * 0.18215
|
| 255 |
+
|
| 256 |
+
def to_image(self, image):
|
| 257 |
+
|
| 258 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 259 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 260 |
+
images = (image * 255).round().astype("uint8")
|
| 261 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 262 |
+
|
| 263 |
+
return pil_images
|
| 264 |
+
|
| 265 |
+
def set_scheduler_timesteps(self, n_steps):
|
| 266 |
+
self.scheduler.set_timesteps(n_steps, device=self.unet.device)
|
| 267 |
+
|
| 268 |
+
def get_text_embeddings(self, prompts, n_imgs):
|
| 269 |
+
|
| 270 |
+
text_tokens = self.text_tokenize(prompts)
|
| 271 |
+
|
| 272 |
+
text_embeddings = self.text_encode(text_tokens)
|
| 273 |
+
|
| 274 |
+
unconditional_tokens = self.text_tokenize([""] * len(prompts))
|
| 275 |
+
|
| 276 |
+
unconditional_embeddings = self.text_encode(unconditional_tokens)
|
| 277 |
+
|
| 278 |
+
text_embeddings = torch.cat([unconditional_embeddings, text_embeddings]).repeat_interleave(n_imgs, dim=0)
|
| 279 |
+
|
| 280 |
+
return text_embeddings
|
| 281 |
+
|
| 282 |
+
def predict_noise(self,
|
| 283 |
+
iteration,
|
| 284 |
+
latents,
|
| 285 |
+
text_embeddings,
|
| 286 |
+
guidance_scale=7.5
|
| 287 |
+
):
|
| 288 |
+
|
| 289 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
| 290 |
+
latents = torch.cat([latents] * 2)
|
| 291 |
+
latents = self.scheduler.scale_model_input(
|
| 292 |
+
latents, self.scheduler.timesteps[iteration])
|
| 293 |
+
|
| 294 |
+
# predict the noise residual
|
| 295 |
+
noise_prediction = self.unet(
|
| 296 |
+
latents, self.scheduler.timesteps[iteration], encoder_hidden_states=text_embeddings).sample
|
| 297 |
+
|
| 298 |
+
# perform guidance
|
| 299 |
+
noise_prediction_uncond, noise_prediction_text = noise_prediction.chunk(2)
|
| 300 |
+
noise_prediction = noise_prediction_uncond + guidance_scale * \
|
| 301 |
+
(noise_prediction_text - noise_prediction_uncond)
|
| 302 |
+
|
| 303 |
+
return noise_prediction
|
| 304 |
+
|
| 305 |
+
@torch.no_grad()
|
| 306 |
+
def inpaint(self, img_tensor, mask_tensor, prompts, n_steps=50, n_imgs=1, show_progress=True, **kwargs):
|
| 307 |
+
assert 0 <= n_steps <= 1000
|
| 308 |
+
assert len(prompts) == n_imgs, "Number of prompts must match number of images"
|
| 309 |
+
|
| 310 |
+
self.set_scheduler_timesteps(n_steps)
|
| 311 |
+
# latents = self.get_initial_latents(n_imgs, img_size, len(prompts))
|
| 312 |
+
|
| 313 |
+
latents = self.encode(img_tensor.to(self.vae.device)) # Ensure img_tensor is on the correct device
|
| 314 |
+
print("latents size", latents.shape)
|
| 315 |
+
# Prepare the mask
|
| 316 |
+
masked_latents = latents.clone()
|
| 317 |
+
print("masked_latents", masked_latents.shape)
|
| 318 |
+
initial_latents = self.get_initial_latents(n_imgs, latents.shape[2], latents.shape[3], len(prompts))
|
| 319 |
+
print("initial_latents", initial_latents.shape)
|
| 320 |
+
print("mask_tensor", mask_tensor.shape)
|
| 321 |
+
print("img_tensor", img_tensor.shape)
|
| 322 |
+
masked_latents[mask_tensor == 1] = initial_latents[mask_tensor == 1]
|
| 323 |
+
|
| 324 |
+
text_embeddings = self.get_text_embeddings(prompts, n_imgs)
|
| 325 |
+
|
| 326 |
+
latents_steps = self.diffusion(
|
| 327 |
+
masked_latents, text_embeddings, end_iteration=n_steps, mask_tensor=mask_tensor, show_progress=show_progress, **kwargs
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
# Clear CUDA cache
|
| 331 |
+
torch.cuda.empty_cache()
|
| 332 |
+
|
| 333 |
+
# Convert final latents to image
|
| 334 |
+
inpainted_image_tensor = latents_steps[-1].to(self.unet.device)
|
| 335 |
+
inpainted_image = self.to_image(self.decode(inpainted_image_tensor))
|
| 336 |
+
|
| 337 |
+
return inpainted_image
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
@torch.no_grad()
|
| 341 |
+
def diffusion(self, latents, text_embeddings, end_iteration=1000, start_iteration=0, mask_tensor=None, show_progress=True, **kwargs):
|
| 342 |
+
latents_steps = []
|
| 343 |
+
|
| 344 |
+
iterator = tqdm(range(start_iteration, end_iteration)) if show_progress else range(start_iteration, end_iteration)
|
| 345 |
+
|
| 346 |
+
for iteration in iterator:
|
| 347 |
+
noise_pred = self.predict_noise(iteration, latents, text_embeddings)
|
| 348 |
+
# Update latents only where the mask is not applied
|
| 349 |
+
latents[mask_tensor == 1] = self.scheduler.step(noise_pred, self.scheduler.timesteps[iteration], latents).prev_sample[mask_tensor == 1]
|
| 350 |
+
latents_steps.append(latents.clone())
|
| 351 |
+
|
| 352 |
+
return latents_steps
|
| 353 |
+
|
| 354 |
+
@torch.no_grad()
|
| 355 |
+
def __call__(self,
|
| 356 |
+
prompts,
|
| 357 |
+
img_size=512,
|
| 358 |
+
n_steps=50,
|
| 359 |
+
n_imgs=1,
|
| 360 |
+
end_iteration=None,
|
| 361 |
+
generator=None,
|
| 362 |
+
eta=1.0,
|
| 363 |
+
variance_scale = 1.0,
|
| 364 |
+
**kwargs):
|
| 365 |
+
assert 0 <= n_steps <= 1000
|
| 366 |
+
|
| 367 |
+
if not isinstance(prompts, list):
|
| 368 |
+
prompts = [prompts]
|
| 369 |
+
|
| 370 |
+
self.set_scheduler_timesteps(n_steps)
|
| 371 |
+
latents = self.get_initial_latents(n_imgs, img_size, len(prompts), generator=generator)
|
| 372 |
+
text_embeddings = self.get_text_embeddings(prompts, n_imgs)
|
| 373 |
+
end_iteration = end_iteration or n_steps
|
| 374 |
+
|
| 375 |
+
latents_steps, trace_steps, noise_preds, output_steps = self.diffusion(latents, text_embeddings, end_iteration=end_iteration, eta=eta, variance_scale=variance_scale, **kwargs)
|
| 376 |
+
returned_latents = latents_steps
|
| 377 |
+
latents_steps = [self.decode(latents.to(self.unet.device)) for latents in latents_steps]
|
| 378 |
+
images_steps = [self.to_image(latents) for latents in latents_steps]
|
| 379 |
+
|
| 380 |
+
np_latents = np.array([latents.cpu().numpy() for latents in latents_steps])
|
| 381 |
+
|
| 382 |
+
print("latents_steps shape: ", np_latents.shape)
|
| 383 |
+
|
| 384 |
+
# for i in range(len(images_steps)):
|
| 385 |
+
# self.safety_checker = self.safety_checker.float()
|
| 386 |
+
# safety_checker_input = self.feature_extractor(images_steps[i], return_tensors="pt").to(latents_steps[0].device)
|
| 387 |
+
# image, has_nsfw_concept = self.safety_checker(
|
| 388 |
+
# images=latents_steps[i].float().cpu().numpy(), clip_input=safety_checker_input.pixel_values.float()
|
| 389 |
+
# )
|
| 390 |
+
|
| 391 |
+
# images_steps[i][0] = self.to_image(torch.from_numpy(image))[0]
|
| 392 |
+
|
| 393 |
+
np_images_steps = np.array(images_steps)
|
| 394 |
+
|
| 395 |
+
final_steps = list(zip(*images_steps))
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
# '*' unpacks the images_steps
|
| 399 |
+
|
| 400 |
+
if trace_steps:
|
| 401 |
+
return images_steps, trace_steps
|
| 402 |
+
|
| 403 |
+
return final_steps, np_images_steps, latents_steps, returned_latents, noise_preds, output_steps
|
| 404 |
+
|
| 405 |
+
class FineTunedModel(torch.nn.Module):
|
| 406 |
+
|
| 407 |
+
def __init__(self,
|
| 408 |
+
model,
|
| 409 |
+
train_method,
|
| 410 |
+
):
|
| 411 |
+
|
| 412 |
+
super().__init__()
|
| 413 |
+
|
| 414 |
+
self.model = model
|
| 415 |
+
self.ft_modules = {}
|
| 416 |
+
self.orig_modules = {}
|
| 417 |
+
|
| 418 |
+
freeze(self.model)
|
| 419 |
+
|
| 420 |
+
for module_name, module in model.named_modules():
|
| 421 |
+
if 'unet' not in module_name:
|
| 422 |
+
continue
|
| 423 |
+
if module.__class__.__name__ in ["Linear", "Conv2d", "LoRACompatibleLinear", "LoRACompatibleConv"]:
|
| 424 |
+
if train_method == 'xattn':
|
| 425 |
+
if 'attn2' not in module_name:
|
| 426 |
+
continue
|
| 427 |
+
elif train_method == 'xattn-strict':
|
| 428 |
+
if 'attn2' not in module_name or 'to_q' not in module_name or 'to_k' not in module_name:
|
| 429 |
+
continue
|
| 430 |
+
elif train_method == 'noxattn':
|
| 431 |
+
if 'attn2' in module_name:
|
| 432 |
+
continue
|
| 433 |
+
elif train_method == 'selfattn':
|
| 434 |
+
if 'attn1' not in module_name:
|
| 435 |
+
continue
|
| 436 |
+
else:
|
| 437 |
+
raise NotImplementedError(
|
| 438 |
+
f"train_method: {train_method} is not implemented."
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
ft_module = copy.deepcopy(module)
|
| 442 |
+
|
| 443 |
+
self.orig_modules[module_name] = module
|
| 444 |
+
self.ft_modules[module_name] = ft_module
|
| 445 |
+
|
| 446 |
+
unfreeze(ft_module)
|
| 447 |
+
|
| 448 |
+
self.ft_modules_list = torch.nn.ModuleList(self.ft_modules.values())
|
| 449 |
+
self.orig_modules_list = torch.nn.ModuleList(self.orig_modules.values())
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
@classmethod
|
| 453 |
+
def from_checkpoint(cls, model, checkpoint, train_method):
|
| 454 |
+
|
| 455 |
+
if isinstance(checkpoint, str):
|
| 456 |
+
checkpoint = torch.load(checkpoint)
|
| 457 |
+
|
| 458 |
+
modules = [f"{key}$" for key in list(checkpoint.keys())]
|
| 459 |
+
|
| 460 |
+
ftm = FineTunedModel(model, train_method=train_method)
|
| 461 |
+
ftm.load_state_dict(checkpoint)
|
| 462 |
+
|
| 463 |
+
return ftm
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def __enter__(self):
|
| 467 |
+
|
| 468 |
+
for key, ft_module in self.ft_modules.items():
|
| 469 |
+
set_module(self.model, key, ft_module)
|
| 470 |
+
|
| 471 |
+
def __exit__(self, exc_type, exc_value, tb):
|
| 472 |
+
|
| 473 |
+
for key, module in self.orig_modules.items():
|
| 474 |
+
set_module(self.model, key, module)
|
| 475 |
+
|
| 476 |
+
def parameters(self):
|
| 477 |
+
|
| 478 |
+
parameters = []
|
| 479 |
+
|
| 480 |
+
for ft_module in self.ft_modules.values():
|
| 481 |
+
|
| 482 |
+
parameters.extend(list(ft_module.parameters()))
|
| 483 |
+
|
| 484 |
+
return parameters
|
| 485 |
+
|
| 486 |
+
def state_dict(self):
|
| 487 |
+
|
| 488 |
+
state_dict = {key: module.state_dict() for key, module in self.ft_modules.items()}
|
| 489 |
+
|
| 490 |
+
return state_dict
|
| 491 |
+
|
| 492 |
+
def load_state_dict(self, state_dict):
|
| 493 |
+
|
| 494 |
+
for key, sd in state_dict.items():
|
| 495 |
+
|
| 496 |
+
self.ft_modules[key].load_state_dict(sd)
|
| 497 |
+
def train(erase_concept, erase_from, train_method, iterations, negative_guidance, lr, save_path):
|
| 498 |
+
|
| 499 |
+
nsteps = 50
|
| 500 |
+
|
| 501 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 502 |
+
diffuser.train()
|
| 503 |
+
|
| 504 |
+
finetuner = FineTunedModel(diffuser, train_method=train_method)
|
| 505 |
+
|
| 506 |
+
optimizer = torch.optim.Adam(finetuner.parameters(), lr=lr)
|
| 507 |
+
criteria = torch.nn.MSELoss()
|
| 508 |
+
|
| 509 |
+
pbar = tqdm(range(iterations))
|
| 510 |
+
erase_concept = erase_concept.split(',')
|
| 511 |
+
erase_concept = [a.strip() for a in erase_concept]
|
| 512 |
+
|
| 513 |
+
erase_from = erase_from.split(',')
|
| 514 |
+
erase_from = [a.strip() for a in erase_from]
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
if len(erase_from)!=len(erase_concept):
|
| 518 |
+
if len(erase_from) == 1:
|
| 519 |
+
c = erase_from[0]
|
| 520 |
+
erase_from = [c for _ in erase_concept]
|
| 521 |
+
else:
|
| 522 |
+
print(erase_from, erase_concept)
|
| 523 |
+
raise Exception("Erase from concepts length need to match erase concepts length")
|
| 524 |
+
|
| 525 |
+
erase_concept_ = []
|
| 526 |
+
for e, f in zip(erase_concept, erase_from):
|
| 527 |
+
erase_concept_.append([e,f])
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
erase_concept = erase_concept_
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
print(erase_concept)
|
| 536 |
+
|
| 537 |
+
# del diffuser.vae
|
| 538 |
+
# del diffuser.text_encoder
|
| 539 |
+
# del diffuser.tokenizer
|
| 540 |
+
|
| 541 |
+
torch.cuda.empty_cache()
|
| 542 |
+
|
| 543 |
+
for i in pbar:
|
| 544 |
+
with torch.no_grad():
|
| 545 |
+
index = np.random.choice(len(erase_concept), 1, replace=False)[0]
|
| 546 |
+
erase_concept_sampled = erase_concept[index]
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
neutral_text_embeddings = diffuser.get_text_embeddings([''],n_imgs=1)
|
| 550 |
+
positive_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[0]],n_imgs=1)
|
| 551 |
+
target_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[1]],n_imgs=1)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
diffuser.set_scheduler_timesteps(nsteps)
|
| 555 |
+
|
| 556 |
+
optimizer.zero_grad()
|
| 557 |
+
|
| 558 |
+
iteration = torch.randint(1, nsteps - 1, (1,)).item()
|
| 559 |
+
|
| 560 |
+
latents = diffuser.get_initial_latents(1, 512, 1)
|
| 561 |
+
|
| 562 |
+
with finetuner:
|
| 563 |
+
|
| 564 |
+
latents_steps, _ = diffuser.diffusion(
|
| 565 |
+
latents,
|
| 566 |
+
positive_text_embeddings,
|
| 567 |
+
start_iteration=0,
|
| 568 |
+
end_iteration=iteration,
|
| 569 |
+
guidance_scale=3,
|
| 570 |
+
show_progress=False
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
diffuser.set_scheduler_timesteps(1000)
|
| 574 |
+
|
| 575 |
+
iteration = int(iteration / nsteps * 1000)
|
| 576 |
+
|
| 577 |
+
positive_latents = diffuser.predict_noise(iteration, latents_steps[0], positive_text_embeddings, guidance_scale=1)
|
| 578 |
+
neutral_latents = diffuser.predict_noise(iteration, latents_steps[0], neutral_text_embeddings, guidance_scale=1)
|
| 579 |
+
target_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 580 |
+
if erase_concept_sampled[0] == erase_concept_sampled[1]:
|
| 581 |
+
target_latents = neutral_latents.clone().detach()
|
| 582 |
+
with finetuner:
|
| 583 |
+
negative_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 584 |
+
|
| 585 |
+
positive_latents.requires_grad = False
|
| 586 |
+
neutral_latents.requires_grad = False
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
loss = criteria(negative_latents, target_latents - (negative_guidance*(positive_latents - neutral_latents))) #loss = criteria(e_n, e_0) works the best try 5000 epochs
|
| 590 |
+
|
| 591 |
+
loss.backward()
|
| 592 |
+
optimizer.step()
|
| 593 |
+
|
| 594 |
+
torch.save(finetuner.state_dict(), save_path)
|
| 595 |
+
|
| 596 |
+
del diffuser, loss, optimizer, finetuner, negative_latents, neutral_latents, positive_latents, latents_steps, latents
|
| 597 |
+
|
| 598 |
+
torch.cuda.empty_cache()
|
| 599 |
+
|
| 600 |
+
if __name__ == '__main__':
|
| 601 |
+
model_path='ESD_Models/car_noxattn_200.pt'
|
| 602 |
+
state_dict = torch.load(model_path)
|
| 603 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 604 |
+
finetuner = FineTunedModel(diffuser, train_method='noxattn')
|
| 605 |
+
|
| 606 |
+
finetuner.load_state_dict(state_dict)
|
| 607 |
+
|
| 608 |
+
#generation loop
|
| 609 |
+
all_images = []
|
| 610 |
+
with finetuner:
|
| 611 |
+
images = diffuser('image of a car', n_steps=50, generator=torch.manual_seed(2440), eta=1.0)
|
| 612 |
+
plt.imshow(images[0][0])
|
Attack_code/Inpainting/inpainting_diffusion.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Attack_code/Noisy Diffusion(Eta attack)/clilp_utils.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import requests
|
| 3 |
+
import os, glob
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import numpy as np
|
| 6 |
+
import re
|
| 7 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 8 |
+
import importlib
|
| 9 |
+
import torch
|
| 10 |
+
# Make changes to esd_diffusers.py file here
|
| 11 |
+
from eta_diffusion import FineTunedModel, StableDiffuser
|
| 12 |
+
|
| 13 |
+
class ExperimentImageSet:
|
| 14 |
+
def __init__(self, stable_diffusion, eta_0_image, attack_images, interference_images = None, prompt: str = None, interference_prompt1 = None, interference_prompt2 = None, seed: int = None):
|
| 15 |
+
self.stable_diffusion: np.ndarray = stable_diffusion
|
| 16 |
+
self.eta_0_image: np.ndarray = eta_0_image
|
| 17 |
+
self.attack_images: np.ndarray = attack_images
|
| 18 |
+
self.interference_images: np.ndarray = interference_images
|
| 19 |
+
self.target_prompt = prompt
|
| 20 |
+
self.seed = seed
|
| 21 |
+
self.interference_prompt1 = interference_prompt1
|
| 22 |
+
self.interference_prompt2 = interference_prompt2
|
| 23 |
+
|
| 24 |
+
def erased_gen(target_csv_path, target_model_path, train_method, etas, num_prompts):
|
| 25 |
+
# Load the CSV file
|
| 26 |
+
target_data = pd.read_csv(target_csv_path)
|
| 27 |
+
|
| 28 |
+
torch.cuda.empty_cache()
|
| 29 |
+
variance_scales = [1.0]
|
| 30 |
+
|
| 31 |
+
# Placeholder for the total images and experiment sets
|
| 32 |
+
total_images = []
|
| 33 |
+
total_experiment_sets = []
|
| 34 |
+
ct = 0
|
| 35 |
+
|
| 36 |
+
# Initialize the diffuser and finetuner models
|
| 37 |
+
state_dict = torch.load(target_model_path)
|
| 38 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 39 |
+
finetuner = FineTunedModel(diffuser, train_method=train_method)
|
| 40 |
+
finetuner.load_state_dict(state_dict)
|
| 41 |
+
|
| 42 |
+
# Iterate through the target data
|
| 43 |
+
for index, row in target_data.head(num_prompts).iterrows():
|
| 44 |
+
prompt = row['prompt']
|
| 45 |
+
seed = int(row['evaluation_seed']) # Assuming 'evaluation_seed' contains the seed values
|
| 46 |
+
|
| 47 |
+
# Base stable diffusion image
|
| 48 |
+
stable_diffusion, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 49 |
+
prompt,
|
| 50 |
+
n_steps=50,
|
| 51 |
+
generator=torch.manual_seed(seed),
|
| 52 |
+
eta=0.0,
|
| 53 |
+
variance_scale=0.0
|
| 54 |
+
)
|
| 55 |
+
total_images.append(stable_diffusion)
|
| 56 |
+
|
| 57 |
+
# Finetuned no attack image
|
| 58 |
+
with finetuner:
|
| 59 |
+
finetuned_no_attack, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 60 |
+
prompt,
|
| 61 |
+
n_steps=50,
|
| 62 |
+
generator=torch.manual_seed(seed),
|
| 63 |
+
eta=0.0,
|
| 64 |
+
variance_scale=0.0
|
| 65 |
+
)
|
| 66 |
+
total_images.append(finetuned_no_attack)
|
| 67 |
+
|
| 68 |
+
attack_images = []
|
| 69 |
+
for eta in etas:
|
| 70 |
+
for variance_scale in variance_scales:
|
| 71 |
+
eta_image, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 72 |
+
prompt,
|
| 73 |
+
n_steps=50,
|
| 74 |
+
generator=torch.manual_seed(seed),
|
| 75 |
+
eta=eta,
|
| 76 |
+
variance_scale=variance_scale
|
| 77 |
+
)
|
| 78 |
+
attack_images.append(eta_image)
|
| 79 |
+
total_images.extend(attack_images)
|
| 80 |
+
|
| 81 |
+
# Construct an experiment set with the images
|
| 82 |
+
experiment_set = ExperimentImageSet(
|
| 83 |
+
stable_diffusion=stable_diffusion,
|
| 84 |
+
eta_0_image=finetuned_no_attack,
|
| 85 |
+
attack_images=np.array(attack_images),
|
| 86 |
+
interference_images=None, # Assuming no interference images in this case
|
| 87 |
+
prompt=prompt,
|
| 88 |
+
seed=seed,
|
| 89 |
+
interference_prompt1 = None,
|
| 90 |
+
interference_prompt2 = None
|
| 91 |
+
)
|
| 92 |
+
total_experiment_sets.append(experiment_set)
|
| 93 |
+
|
| 94 |
+
ct += 1 + len(etas)
|
| 95 |
+
print(f"diffusion-count {ct} for prompt: {prompt}")
|
| 96 |
+
|
| 97 |
+
# Convert total images to a NumPy array
|
| 98 |
+
total_images = np.array(total_images)
|
| 99 |
+
|
| 100 |
+
# Assuming fixed_images is needed as an array of final images
|
| 101 |
+
fixed_images = []
|
| 102 |
+
for image in total_images:
|
| 103 |
+
fixed_images.append(image[0][49])
|
| 104 |
+
|
| 105 |
+
# Convert fixed_images to NumPy array
|
| 106 |
+
fixed_images = np.array(fixed_images)
|
| 107 |
+
|
| 108 |
+
print("Image grid shape:", fixed_images.shape)
|
| 109 |
+
|
| 110 |
+
return fixed_images, total_experiment_sets
|
| 111 |
+
|
| 112 |
+
from transformers import CLIPModel, CLIPProcessor
|
| 113 |
+
import torch
|
| 114 |
+
import numpy as np
|
| 115 |
+
|
| 116 |
+
from transformers import CLIPModel, CLIPProcessor
|
| 117 |
+
import torch
|
| 118 |
+
import numpy as np
|
| 119 |
+
|
| 120 |
+
def process_images(model, processor, prompt: str, images: list):
|
| 121 |
+
"""Processes images and returns CLIP scores."""
|
| 122 |
+
images = np.array(images)
|
| 123 |
+
images = images.squeeze()
|
| 124 |
+
print(images.shape)
|
| 125 |
+
images = [image[49] for image in images]
|
| 126 |
+
inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True)
|
| 127 |
+
outputs = model(**inputs)
|
| 128 |
+
return [clip_score.item() for clip_score in outputs.logits_per_image]
|
| 129 |
+
|
| 130 |
+
def calculate_experiment_scores(experiment, model, processor):
|
| 131 |
+
"""Calculates CLIP scores for each image set in the experiment."""
|
| 132 |
+
targeted_images = [experiment.stable_diffusion, experiment.eta_0_image]
|
| 133 |
+
targeted_images.extend(experiment.attack_images)
|
| 134 |
+
clip_scores = process_images(model, processor, experiment.target_prompt, targeted_images)
|
| 135 |
+
|
| 136 |
+
scores = {
|
| 137 |
+
'SD': clip_scores[0], # Stable diffusion image score
|
| 138 |
+
'ETA_0': clip_scores[1], # ETA_0 image score
|
| 139 |
+
'ATTACK': max(clip_scores[2:]), # Best attack image score
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
if experiment.interference_images:
|
| 143 |
+
interference_images = experiment.interference_images
|
| 144 |
+
interference_images = np.array(interference_images)
|
| 145 |
+
interference_images = interference_images.squeeze()
|
| 146 |
+
interference_images = [interference_image[49] for interference_image in interference_images]
|
| 147 |
+
inputs = processor(text=[experiment.interference_prompt1], images=interference_images[0], return_tensors="pt", padding=True)
|
| 148 |
+
outputs = model(**inputs)
|
| 149 |
+
interference_1 = outputs.logits_per_image.item()
|
| 150 |
+
|
| 151 |
+
inputs = processor(text=[experiment.interference_prompt2], images=interference_images[1], return_tensors="pt", padding=True)
|
| 152 |
+
outputs = model(**inputs)
|
| 153 |
+
interference_2 = outputs.logits_per_image.item()
|
| 154 |
+
scores['INTERFERENCE1'] = interference_1 # Assuming first interference score is used
|
| 155 |
+
scores['INTERFERENCE2'] = interference_2 # Assuming first interference score is used
|
| 156 |
+
|
| 157 |
+
return scores
|
| 158 |
+
|
| 159 |
+
def get_clip_scores(experiment_sets: list['ExperimentImageSet']):
|
| 160 |
+
"""Processes a list of experiments and returns mean CLIP scores."""
|
| 161 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 162 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 163 |
+
|
| 164 |
+
total_clip_scores = {'SD': 0, 'ETA_0': 0, 'ATTACK': 0, 'INTERFERENCE1': 0, 'INTERFERENCE2' : 0}
|
| 165 |
+
experiment_count = len(experiment_sets)
|
| 166 |
+
|
| 167 |
+
for experiment in experiment_sets:
|
| 168 |
+
experiment_scores = calculate_experiment_scores(experiment, model, processor)
|
| 169 |
+
for key in total_clip_scores:
|
| 170 |
+
total_clip_scores[key] += experiment_scores.get(key, 0)
|
| 171 |
+
|
| 172 |
+
# Calculate mean scores
|
| 173 |
+
mean_clip_scores = {key: score / experiment_count for key, score in total_clip_scores.items()}
|
| 174 |
+
return mean_clip_scores
|
| 175 |
+
|
| 176 |
+
def get_simple_clip_scores(images_list, prompts):
|
| 177 |
+
"""
|
| 178 |
+
Processes a list of images and prompts and returns the mean CLIP score for each prompt-image pair.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
images_list (list of lists): List of image sets where each sublist contains images for one prompt.
|
| 182 |
+
prompts (list of str): List of prompts corresponding to each image set.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
mean_clip_score (float): Mean CLIP score across all image-prompt pairs.
|
| 186 |
+
"""
|
| 187 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 188 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 189 |
+
|
| 190 |
+
total_score = 0.0
|
| 191 |
+
total_images = 0
|
| 192 |
+
full_clip_set = []
|
| 193 |
+
for images, prompt in zip(images_list, prompts):
|
| 194 |
+
inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True) # Indentation fixed here
|
| 195 |
+
outputs = model(**inputs)
|
| 196 |
+
clip_scores = [clip_score.item() for clip_score in outputs.logits_per_image]
|
| 197 |
+
full_clip_set.extend(np.round(clip_scores, 2))
|
| 198 |
+
|
| 199 |
+
# Calculate mean score
|
| 200 |
+
return full_clip_set
|
| 201 |
+
|
| 202 |
+
import matplotlib.pyplot as plt
|
| 203 |
+
import matplotlib.pyplot as plt
|
| 204 |
+
|
| 205 |
+
def show_image_grid_with_scores(img_files, subtitles=None, clip_scores=None, num_rows=3, num_cols=4, fig_size=(15, 10)):
|
| 206 |
+
"""
|
| 207 |
+
Displays a grid of images with subtitles and optional CLIP scores.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
img_files (list of np.ndarray): List of images to display.
|
| 211 |
+
subtitles (list of str): List of labels for the images.
|
| 212 |
+
clip_scores (list of float): List of CLIP scores for the images.
|
| 213 |
+
num_rows (int): Number of rows in the grid.
|
| 214 |
+
num_cols (int): Number of columns in the grid.
|
| 215 |
+
fig_size (tuple): Size of the figure.
|
| 216 |
+
"""
|
| 217 |
+
# Create a grid to display the images
|
| 218 |
+
fig, axes = plt.subplots(num_rows, num_cols, figsize=fig_size)
|
| 219 |
+
if not subtitles and clip_scores:
|
| 220 |
+
subtitles = ['SD', 'Finetuned', 'ETA', "ETA", "ETA", 'eta']*(len(clip_scores)//6)
|
| 221 |
+
else:
|
| 222 |
+
subtitles = ['SD', 'Finetuned', 'ETA', "ETA", "ETA", 'eta']
|
| 223 |
+
# Plot each image in the grid row-wise
|
| 224 |
+
for i, ax in enumerate(axes.flatten()):
|
| 225 |
+
img_index = i # row-major order
|
| 226 |
+
if img_index < len(img_files):
|
| 227 |
+
img = img_files[img_index]
|
| 228 |
+
ax.imshow(img)
|
| 229 |
+
|
| 230 |
+
# Construct subtitle with label and optional CLIP score
|
| 231 |
+
if subtitles and img_index < len(subtitles):
|
| 232 |
+
subtitle = subtitles[img_index]
|
| 233 |
+
if clip_scores and img_index < len(clip_scores):
|
| 234 |
+
subtitle += f" CLIP: {clip_scores[img_index]:.3f}"
|
| 235 |
+
ax.set_title(subtitle, fontsize=14)
|
| 236 |
+
|
| 237 |
+
ax.axis('off') # Turn off axis labels
|
| 238 |
+
|
| 239 |
+
plt.tight_layout()
|
| 240 |
+
plt.show()
|
| 241 |
+
|
| 242 |
+
# Example usage
|
| 243 |
+
# erased_images = [image1, image2, image3, ...] # Replace with actual images
|
| 244 |
+
# subtitles = ["Original", "Finetuner no attack", "Eta Attack", ...] # Replace with actual subtitles
|
| 245 |
+
# clip_scores = [0.85, 0.92, 0.75, ...] # Replace with actual CLIP scores
|
| 246 |
+
# show_image_grid_with_scores(erased_images, subtitles=subtitles, clip_scores=clip_scores, num_rows=2, num_cols=6)
|
| 247 |
+
|
| 248 |
+
def interference_gen(target_csv_path, interference_path1, interference_path2, target_model_path, train_method, etas, num_prompts):
|
| 249 |
+
# Load the target and interference CSV files
|
| 250 |
+
target_data = pd.read_csv(target_csv_path)
|
| 251 |
+
interference_data1 = pd.read_csv(interference_path1)
|
| 252 |
+
interference_data2 = pd.read_csv(interference_path2)
|
| 253 |
+
|
| 254 |
+
torch.cuda.empty_cache()
|
| 255 |
+
variance_scales = [1.0]
|
| 256 |
+
|
| 257 |
+
# Placeholder for the total images and experiment sets
|
| 258 |
+
total_images = []
|
| 259 |
+
total_experiment_sets = []
|
| 260 |
+
ct = 0
|
| 261 |
+
|
| 262 |
+
# Initialize the diffuser and finetuner models
|
| 263 |
+
state_dict = torch.load(target_model_path)
|
| 264 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 265 |
+
finetuner = FineTunedModel(diffuser, train_method=train_method)
|
| 266 |
+
finetuner.load_state_dict(state_dict)
|
| 267 |
+
|
| 268 |
+
# Iterate through the target data along with interference data from the other two CSVs
|
| 269 |
+
for (index, row), (index1, row1), (index2, row2) in zip(
|
| 270 |
+
target_data.head(num_prompts).iterrows(),
|
| 271 |
+
interference_data1.head(num_prompts).iterrows(),
|
| 272 |
+
interference_data2.head(num_prompts).iterrows()
|
| 273 |
+
):
|
| 274 |
+
|
| 275 |
+
prompt = row['prompt']
|
| 276 |
+
seed = int(row['evaluation_seed']) # Assuming 'evaluation_seed' contains the seed values
|
| 277 |
+
|
| 278 |
+
interference_prompt1 = row1['prompt']
|
| 279 |
+
interference_seed1 = int(row1['evaluation_seed'])
|
| 280 |
+
|
| 281 |
+
interference_prompt2 = row2['prompt']
|
| 282 |
+
interference_seed2 = int(row2['evaluation_seed'])
|
| 283 |
+
|
| 284 |
+
# Base stable diffusion image
|
| 285 |
+
stable_diffusion, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 286 |
+
prompt,
|
| 287 |
+
n_steps=50,
|
| 288 |
+
generator=torch.manual_seed(seed),
|
| 289 |
+
eta=0.0,
|
| 290 |
+
variance_scale=0.0
|
| 291 |
+
)
|
| 292 |
+
total_images.append(stable_diffusion)
|
| 293 |
+
|
| 294 |
+
# Finetuned no attack image
|
| 295 |
+
with finetuner:
|
| 296 |
+
finetuned_no_attack, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 297 |
+
prompt,
|
| 298 |
+
n_steps=50,
|
| 299 |
+
generator=torch.manual_seed(seed),
|
| 300 |
+
eta=0.0,
|
| 301 |
+
variance_scale=0.0
|
| 302 |
+
)
|
| 303 |
+
total_images.append(finetuned_no_attack)
|
| 304 |
+
|
| 305 |
+
attack_images = []
|
| 306 |
+
for eta in etas:
|
| 307 |
+
for variance_scale in variance_scales:
|
| 308 |
+
eta_image, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 309 |
+
prompt,
|
| 310 |
+
n_steps=50,
|
| 311 |
+
generator=torch.manual_seed(seed),
|
| 312 |
+
eta=eta,
|
| 313 |
+
variance_scale=variance_scale
|
| 314 |
+
)
|
| 315 |
+
attack_images.append(eta_image)
|
| 316 |
+
total_images.extend(attack_images)
|
| 317 |
+
|
| 318 |
+
# Generate interference images using prompts and seeds from the interference CSVs
|
| 319 |
+
interference_image1, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 320 |
+
interference_prompt1,
|
| 321 |
+
n_steps=50,
|
| 322 |
+
generator=torch.manual_seed(interference_seed1),
|
| 323 |
+
eta=0.0, # No attack (eta = 0)
|
| 324 |
+
variance_scale=0.0 # No attack variance
|
| 325 |
+
)
|
| 326 |
+
total_images.append(interference_image1)
|
| 327 |
+
|
| 328 |
+
interference_image2, images_steps, decoded_latents, latents, noise_preds, output_steps = diffuser(
|
| 329 |
+
interference_prompt2,
|
| 330 |
+
n_steps=50,
|
| 331 |
+
generator=torch.manual_seed(interference_seed2),
|
| 332 |
+
eta=0.0, # No attack (eta = 0)
|
| 333 |
+
variance_scale=0.0 # No attack variance
|
| 334 |
+
)
|
| 335 |
+
total_images.append(interference_image2)
|
| 336 |
+
|
| 337 |
+
# Construct an experiment set with the images, including the interference images
|
| 338 |
+
experiment_set = ExperimentImageSet(
|
| 339 |
+
stable_diffusion=stable_diffusion,
|
| 340 |
+
eta_0_image=finetuned_no_attack,
|
| 341 |
+
attack_images=np.array(attack_images),
|
| 342 |
+
interference_images=[interference_image1, interference_image2], # Adding interference images
|
| 343 |
+
prompt=prompt,
|
| 344 |
+
seed=seed,
|
| 345 |
+
interference_prompt1=interference_prompt1,
|
| 346 |
+
interference_prompt2=interference_prompt2
|
| 347 |
+
)
|
| 348 |
+
total_experiment_sets.append(experiment_set)
|
| 349 |
+
|
| 350 |
+
ct += 1 + len(etas)
|
| 351 |
+
print(f"diffusion-count {ct} for prompt: {prompt}")
|
| 352 |
+
|
| 353 |
+
# Convert total images to a NumPy array
|
| 354 |
+
total_images = np.array(total_images)
|
| 355 |
+
|
| 356 |
+
# Assuming fixed_images is needed as an array of final images
|
| 357 |
+
fixed_images = []
|
| 358 |
+
for image in total_images:
|
| 359 |
+
fixed_images.append(image[0][49])
|
| 360 |
+
|
| 361 |
+
# Convert fixed_images to NumPy array
|
| 362 |
+
fixed_images = np.array(fixed_images)
|
| 363 |
+
|
| 364 |
+
print("Image grid shape:", fixed_images.shape)
|
| 365 |
+
|
| 366 |
+
return fixed_images, total_experiment_sets
|
Attack_code/Noisy Diffusion(Eta attack)/clip_gen_tests.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Attack_code/Noisy Diffusion(Eta attack)/eta_diffusion.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from matplotlib import pyplot as plt
|
| 3 |
+
import textwrap
|
| 4 |
+
import argparse
|
| 5 |
+
import torch
|
| 6 |
+
import copy
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import numpy as np
|
| 10 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from tqdm.auto import tqdm
|
| 13 |
+
from transformers import CLIPTextModel, CLIPTokenizer, CLIPFeatureExtractor
|
| 14 |
+
from diffusers.schedulers import EulerAncestralDiscreteScheduler
|
| 15 |
+
from eta_diffusers.src.diffusers.schedulers.eta_ddim_scheduler import DDIMScheduler
|
| 16 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
|
| 17 |
+
from diffusers.schedulers.scheduling_lms_discrete import LMSDiscreteScheduler
|
| 18 |
+
# from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 19 |
+
|
| 20 |
+
def show_image_grid(img_files, num_rows=3, num_cols=4, fig_size=(15, 10)):
|
| 21 |
+
# Create a grid to display the images
|
| 22 |
+
fig, axes = plt.subplots(num_rows, num_cols, figsize=fig_size)
|
| 23 |
+
|
| 24 |
+
# Plot each image in the grid row-wise
|
| 25 |
+
for i, ax in enumerate(axes.flatten()):
|
| 26 |
+
img_index = i # row-major order
|
| 27 |
+
if img_index < len(img_files):
|
| 28 |
+
img = img_files[img_index]
|
| 29 |
+
ax.imshow(img)
|
| 30 |
+
ax.axis('off') # Turn off axis labels
|
| 31 |
+
|
| 32 |
+
plt.tight_layout()
|
| 33 |
+
plt.show()
|
| 34 |
+
|
| 35 |
+
# Example usage
|
| 36 |
+
# img_files = [image1, image2, image3, ...] # Replace with actual images
|
| 37 |
+
# show_image_grid(img_files)
|
| 38 |
+
|
| 39 |
+
def to_gif(images, path):
|
| 40 |
+
|
| 41 |
+
images[0].save(path, save_all=True,
|
| 42 |
+
append_images=images[1:], loop=0, duration=len(images) * 20)
|
| 43 |
+
|
| 44 |
+
def figure_to_image(figure):
|
| 45 |
+
|
| 46 |
+
figure.set_dpi(300)
|
| 47 |
+
|
| 48 |
+
figure.canvas.draw()
|
| 49 |
+
|
| 50 |
+
return Image.frombytes('RGB', figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
|
| 51 |
+
|
| 52 |
+
def image_grid(images, outpath=None, column_titles=None, row_titles=None):
|
| 53 |
+
|
| 54 |
+
n_rows = len(images)
|
| 55 |
+
n_cols = len(images[0])
|
| 56 |
+
|
| 57 |
+
fig, axs = plt.subplots(nrows=n_rows, ncols=n_cols,
|
| 58 |
+
figsize=(n_cols, n_rows), squeeze=False)
|
| 59 |
+
|
| 60 |
+
for row, _images in enumerate(images):
|
| 61 |
+
|
| 62 |
+
for column, image in enumerate(_images):
|
| 63 |
+
ax = axs[row][column]
|
| 64 |
+
ax.imshow(image)
|
| 65 |
+
if column_titles and row == 0:
|
| 66 |
+
ax.set_title(textwrap.fill(
|
| 67 |
+
column_titles[column], width=12), fontsize='x-small')
|
| 68 |
+
if row_titles and column == 0:
|
| 69 |
+
ax.set_ylabel(row_titles[row], rotation=0, fontsize='x-small', labelpad=1.6 * len(row_titles[row]))
|
| 70 |
+
ax.set_xticks([])
|
| 71 |
+
ax.set_yticks([])
|
| 72 |
+
|
| 73 |
+
plt.subplots_adjust(wspace=0, hspace=0)
|
| 74 |
+
|
| 75 |
+
if outpath is not None:
|
| 76 |
+
plt.savefig(outpath, bbox_inches='tight', dpi=300)
|
| 77 |
+
plt.close()
|
| 78 |
+
else:
|
| 79 |
+
plt.tight_layout(pad=0)
|
| 80 |
+
image = figure_to_image(plt.gcf())
|
| 81 |
+
plt.close()
|
| 82 |
+
return image
|
| 83 |
+
|
| 84 |
+
def get_module(module, module_name):
|
| 85 |
+
|
| 86 |
+
if isinstance(module_name, str):
|
| 87 |
+
module_name = module_name.split('.')
|
| 88 |
+
|
| 89 |
+
if len(module_name) == 0:
|
| 90 |
+
return module
|
| 91 |
+
else:
|
| 92 |
+
module = getattr(module, module_name[0])
|
| 93 |
+
return get_module(module, module_name[1:])
|
| 94 |
+
|
| 95 |
+
def set_module(module, module_name, new_module):
|
| 96 |
+
|
| 97 |
+
if isinstance(module_name, str):
|
| 98 |
+
module_name = module_name.split('.')
|
| 99 |
+
|
| 100 |
+
if len(module_name) == 1:
|
| 101 |
+
return setattr(module, module_name[0], new_module)
|
| 102 |
+
else:
|
| 103 |
+
module = getattr(module, module_name[0])
|
| 104 |
+
return set_module(module, module_name[1:], new_module)
|
| 105 |
+
|
| 106 |
+
def freeze(module):
|
| 107 |
+
|
| 108 |
+
for parameter in module.parameters():
|
| 109 |
+
|
| 110 |
+
parameter.requires_grad = False
|
| 111 |
+
|
| 112 |
+
def unfreeze(module):
|
| 113 |
+
|
| 114 |
+
for parameter in module.parameters():
|
| 115 |
+
|
| 116 |
+
parameter.requires_grad = True
|
| 117 |
+
|
| 118 |
+
def get_concat_h(im1, im2):
|
| 119 |
+
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
|
| 120 |
+
dst.paste(im1, (0, 0))
|
| 121 |
+
dst.paste(im2, (im1.width, 0))
|
| 122 |
+
return dst
|
| 123 |
+
|
| 124 |
+
def get_concat_v(im1, im2):
|
| 125 |
+
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
|
| 126 |
+
dst.paste(im1, (0, 0))
|
| 127 |
+
dst.paste(im2, (0, im1.height))
|
| 128 |
+
return dst
|
| 129 |
+
|
| 130 |
+
class StableDiffuser(torch.nn.Module):
|
| 131 |
+
|
| 132 |
+
def __init__(self,
|
| 133 |
+
scheduler='DDIM'
|
| 134 |
+
):
|
| 135 |
+
print('code changed')
|
| 136 |
+
|
| 137 |
+
super().__init__()
|
| 138 |
+
|
| 139 |
+
# Load the autoencoder model which will be used to decode the latents into image space.
|
| 140 |
+
self.vae = AutoencoderKL.from_pretrained(
|
| 141 |
+
"CompVis/stable-diffusion-v1-4", subfolder="vae")
|
| 142 |
+
print(self.vae.config.scaling_factor )
|
| 143 |
+
# Load the tokenizer and text encoder to tokenize and encode the text.
|
| 144 |
+
self.tokenizer = CLIPTokenizer.from_pretrained(
|
| 145 |
+
"openai/clip-vit-large-patch14")
|
| 146 |
+
self.text_encoder = CLIPTextModel.from_pretrained(
|
| 147 |
+
"openai/clip-vit-large-patch14")
|
| 148 |
+
|
| 149 |
+
# The UNet model for generating the latents.
|
| 150 |
+
self.unet = UNet2DConditionModel.from_pretrained(
|
| 151 |
+
"CompVis/stable-diffusion-v1-4", subfolder="unet")
|
| 152 |
+
|
| 153 |
+
self.feature_extractor = CLIPFeatureExtractor.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="feature_extractor")
|
| 154 |
+
# self.safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="safety_checker")
|
| 155 |
+
|
| 156 |
+
if scheduler == 'LMS':
|
| 157 |
+
self.scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 158 |
+
elif scheduler == 'DDIM':
|
| 159 |
+
self.scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 160 |
+
elif scheduler == 'DDPM':
|
| 161 |
+
self.scheduler = DDPMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 162 |
+
|
| 163 |
+
self.eval()
|
| 164 |
+
|
| 165 |
+
def get_noise(self, batch_size, img_size, generator=None):
|
| 166 |
+
|
| 167 |
+
param = list(self.parameters())[0]
|
| 168 |
+
|
| 169 |
+
return torch.randn(
|
| 170 |
+
(batch_size, self.unet.in_channels, img_size // 8, img_size // 8),
|
| 171 |
+
generator=generator).type(param.dtype).to(param.device)
|
| 172 |
+
|
| 173 |
+
def add_noise(self, latents, noise, step):
|
| 174 |
+
|
| 175 |
+
return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]]))
|
| 176 |
+
|
| 177 |
+
def text_tokenize(self, prompts):
|
| 178 |
+
tokens = self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
| 179 |
+
print("prompts", prompts)
|
| 180 |
+
print("tokens", tokens)
|
| 181 |
+
return tokens
|
| 182 |
+
|
| 183 |
+
def text_detokenize(self, tokens):
|
| 184 |
+
|
| 185 |
+
return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1]
|
| 186 |
+
|
| 187 |
+
def text_encode(self, tokens):
|
| 188 |
+
|
| 189 |
+
return self.text_encoder(tokens.input_ids.to(self.unet.device))[0]
|
| 190 |
+
|
| 191 |
+
def decode(self, latents):
|
| 192 |
+
print(self.vae.config.scaling_factor)
|
| 193 |
+
print(latents)
|
| 194 |
+
print(1 / self.vae.config.scaling_factor * latents)
|
| 195 |
+
print(self.vae.decode(1 / self.vae.config.scaling_factor * latents))
|
| 196 |
+
print(self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample)
|
| 197 |
+
return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample
|
| 198 |
+
|
| 199 |
+
def encode(self, tensors):
|
| 200 |
+
|
| 201 |
+
return self.vae.encode(tensors).latent_dist.mode() * 0.18215
|
| 202 |
+
|
| 203 |
+
def to_image(self, image):
|
| 204 |
+
|
| 205 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 206 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 207 |
+
print(image)
|
| 208 |
+
images = (image * 255).round().astype("uint8")
|
| 209 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 210 |
+
|
| 211 |
+
return pil_images
|
| 212 |
+
|
| 213 |
+
def set_scheduler_timesteps(self, n_steps):
|
| 214 |
+
self.scheduler.set_timesteps(n_steps, device=self.unet.device)
|
| 215 |
+
|
| 216 |
+
def get_initial_latents(self, n_imgs, img_size, n_prompts, generator=None):
|
| 217 |
+
|
| 218 |
+
noise = self.get_noise(n_imgs, img_size, generator=generator).repeat(n_prompts, 1, 1, 1)
|
| 219 |
+
|
| 220 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 221 |
+
|
| 222 |
+
return latents
|
| 223 |
+
|
| 224 |
+
def get_noise(self, batch_size, img_size, generator=None):
|
| 225 |
+
|
| 226 |
+
param = list(self.parameters())[0]
|
| 227 |
+
|
| 228 |
+
return torch.randn(
|
| 229 |
+
(batch_size, self.unet.in_channels, img_size // 8, img_size // 8),
|
| 230 |
+
generator=generator).type(param.dtype).to(param.device)
|
| 231 |
+
|
| 232 |
+
def add_noise(self, latents, noise, step):
|
| 233 |
+
|
| 234 |
+
return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]]))
|
| 235 |
+
|
| 236 |
+
def text_tokenize(self, prompts):
|
| 237 |
+
|
| 238 |
+
return self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
| 239 |
+
|
| 240 |
+
def text_detokenize(self, tokens):
|
| 241 |
+
|
| 242 |
+
return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1]
|
| 243 |
+
|
| 244 |
+
def text_encode(self, tokens):
|
| 245 |
+
|
| 246 |
+
return self.text_encoder(tokens.input_ids.to(self.unet.device))[0]
|
| 247 |
+
|
| 248 |
+
def decode(self, latents):
|
| 249 |
+
|
| 250 |
+
return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample
|
| 251 |
+
|
| 252 |
+
def encode(self, tensors):
|
| 253 |
+
|
| 254 |
+
return self.vae.encode(tensors).latent_dist.mode() * 0.18215
|
| 255 |
+
|
| 256 |
+
def to_image(self, image):
|
| 257 |
+
|
| 258 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 259 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 260 |
+
images = (image * 255).round().astype("uint8")
|
| 261 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 262 |
+
|
| 263 |
+
return pil_images
|
| 264 |
+
|
| 265 |
+
def set_scheduler_timesteps(self, n_steps):
|
| 266 |
+
self.scheduler.set_timesteps(n_steps, device=self.unet.device)
|
| 267 |
+
|
| 268 |
+
def get_initial_latents(self, n_imgs, img_size, n_prompts, generator=None):
|
| 269 |
+
|
| 270 |
+
noise = self.get_noise(n_imgs, img_size, generator=generator).repeat(n_prompts, 1, 1, 1)
|
| 271 |
+
|
| 272 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 273 |
+
|
| 274 |
+
return latents
|
| 275 |
+
|
| 276 |
+
def get_text_embeddings(self, prompts, n_imgs):
|
| 277 |
+
|
| 278 |
+
text_tokens = self.text_tokenize(prompts)
|
| 279 |
+
|
| 280 |
+
text_embeddings = self.text_encode(text_tokens)
|
| 281 |
+
|
| 282 |
+
unconditional_tokens = self.text_tokenize([""] * len(prompts))
|
| 283 |
+
|
| 284 |
+
unconditional_embeddings = self.text_encode(unconditional_tokens)
|
| 285 |
+
|
| 286 |
+
text_embeddings = torch.cat([unconditional_embeddings, text_embeddings]).repeat_interleave(n_imgs, dim=0)
|
| 287 |
+
|
| 288 |
+
return text_embeddings
|
| 289 |
+
|
| 290 |
+
def predict_noise(self,
|
| 291 |
+
iteration,
|
| 292 |
+
latents,
|
| 293 |
+
text_embeddings,
|
| 294 |
+
guidance_scale=7.5
|
| 295 |
+
):
|
| 296 |
+
|
| 297 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
| 298 |
+
latents = torch.cat([latents] * 2)
|
| 299 |
+
latents = self.scheduler.scale_model_input(
|
| 300 |
+
latents, self.scheduler.timesteps[iteration])
|
| 301 |
+
|
| 302 |
+
# predict the noise residual
|
| 303 |
+
noise_prediction = self.unet(
|
| 304 |
+
latents, self.scheduler.timesteps[iteration], encoder_hidden_states=text_embeddings).sample
|
| 305 |
+
|
| 306 |
+
# perform guidance
|
| 307 |
+
noise_prediction_uncond, noise_prediction_text = noise_prediction.chunk(2)
|
| 308 |
+
noise_prediction = noise_prediction_uncond + guidance_scale * \
|
| 309 |
+
(noise_prediction_text - noise_prediction_uncond)
|
| 310 |
+
|
| 311 |
+
return noise_prediction
|
| 312 |
+
# New method for inpainting
|
| 313 |
+
@torch.no_grad()
|
| 314 |
+
def inpaint(self, img_tensor, mask_tensor, prompts, img_size=512, n_steps=50, n_imgs=1, **kwargs):
|
| 315 |
+
assert 0 <= n_steps <= 1000
|
| 316 |
+
|
| 317 |
+
self.set_scheduler_timesteps(n_steps)
|
| 318 |
+
latents = self.get_initial_latents(n_imgs, img_size, len(prompts))
|
| 319 |
+
|
| 320 |
+
# Prepare the mask
|
| 321 |
+
masked_latents = latents.clone()
|
| 322 |
+
masked_latents[mask_tensor == 1] = 0 # Set masked areas to 0 or noise
|
| 323 |
+
|
| 324 |
+
text_embeddings = self.get_text_embeddings(prompts, n_imgs)
|
| 325 |
+
|
| 326 |
+
latents_steps, trace_steps, noise_preds, output_steps = self.diffusion(
|
| 327 |
+
masked_latents, text_embeddings, end_iteration=n_steps, **kwargs
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
images_steps = [self.to_image(latents) for latents in latents_steps]
|
| 331 |
+
return images_steps
|
| 332 |
+
|
| 333 |
+
@torch.no_grad()
|
| 334 |
+
def diffusion(self, latents, text_embeddings, end_iteration=1000, start_iteration=0, **kwargs):
|
| 335 |
+
latents_steps = []
|
| 336 |
+
|
| 337 |
+
for iteration in tqdm(range(start_iteration, end_iteration)):
|
| 338 |
+
noise_pred = self.predict_noise(iteration, latents, text_embeddings)
|
| 339 |
+
# Only update latents where the mask is not applied
|
| 340 |
+
latents[mask_tensor == 1] = self.scheduler.step(noise_pred, self.scheduler.timesteps[iteration], latents).prev_sample[mask_tensor == 1]
|
| 341 |
+
latents_steps.append(latents.clone())
|
| 342 |
+
|
| 343 |
+
return latents_steps
|
| 344 |
+
@torch.no_grad()
|
| 345 |
+
def diffusion(self,
|
| 346 |
+
latents,
|
| 347 |
+
text_embeddings,
|
| 348 |
+
end_iteration=1000,
|
| 349 |
+
start_iteration=0,
|
| 350 |
+
return_steps=True,#This was False before
|
| 351 |
+
pred_x0=False,
|
| 352 |
+
trace_args=None,
|
| 353 |
+
show_progress=True,
|
| 354 |
+
eta=0.0,
|
| 355 |
+
variance_scale = 0.0,
|
| 356 |
+
**kwargs):
|
| 357 |
+
|
| 358 |
+
latents_steps = []
|
| 359 |
+
trace_steps = []
|
| 360 |
+
noise_preds = []
|
| 361 |
+
output_steps = []
|
| 362 |
+
trace = None
|
| 363 |
+
|
| 364 |
+
for iteration in tqdm(range(start_iteration, end_iteration), disable=not show_progress):
|
| 365 |
+
|
| 366 |
+
if trace_args:
|
| 367 |
+
|
| 368 |
+
trace = TraceDict(self, **trace_args)
|
| 369 |
+
|
| 370 |
+
noise_pred = self.predict_noise(
|
| 371 |
+
iteration,
|
| 372 |
+
latents,
|
| 373 |
+
text_embeddings,
|
| 374 |
+
**kwargs)
|
| 375 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 376 |
+
# print("Noise_Pred")
|
| 377 |
+
# print(noise_pred)
|
| 378 |
+
noise_preds.append(noise_pred)
|
| 379 |
+
output = self.scheduler.step(noise_pred, self.scheduler.timesteps[iteration], latents, eta=eta, variance_scale=variance_scale)
|
| 380 |
+
# print("Output")
|
| 381 |
+
# print(output)
|
| 382 |
+
output_steps.append(output)
|
| 383 |
+
if trace_args:
|
| 384 |
+
|
| 385 |
+
trace.close()
|
| 386 |
+
|
| 387 |
+
trace_steps.append(trace)
|
| 388 |
+
|
| 389 |
+
latents = output.prev_sample
|
| 390 |
+
# print("latents")
|
| 391 |
+
# print(latents)
|
| 392 |
+
if return_steps or iteration == end_iteration - 1:
|
| 393 |
+
|
| 394 |
+
output = output.pred_original_sample if pred_x0 else latents
|
| 395 |
+
|
| 396 |
+
if return_steps:
|
| 397 |
+
latents_steps.append(output.cpu())
|
| 398 |
+
else:
|
| 399 |
+
latents_steps.append(output)
|
| 400 |
+
|
| 401 |
+
return latents_steps, trace_steps, noise_preds, output_steps
|
| 402 |
+
|
| 403 |
+
@torch.no_grad()
|
| 404 |
+
def __call__(self,
|
| 405 |
+
prompts,
|
| 406 |
+
img_size=512,
|
| 407 |
+
n_steps=50,
|
| 408 |
+
n_imgs=1,
|
| 409 |
+
end_iteration=None,
|
| 410 |
+
generator=None,
|
| 411 |
+
eta=0.0,
|
| 412 |
+
variance_scale = 0.0,
|
| 413 |
+
**kwargs):
|
| 414 |
+
assert 0 <= n_steps <= 1000
|
| 415 |
+
|
| 416 |
+
if not isinstance(prompts, list):
|
| 417 |
+
prompts = [prompts]
|
| 418 |
+
|
| 419 |
+
self.set_scheduler_timesteps(n_steps)
|
| 420 |
+
latents = self.get_initial_latents(n_imgs, img_size, len(prompts), generator=generator)
|
| 421 |
+
text_embeddings = self.get_text_embeddings(prompts, n_imgs)
|
| 422 |
+
end_iteration = end_iteration or n_steps
|
| 423 |
+
|
| 424 |
+
latents_steps, trace_steps, noise_preds, output_steps = self.diffusion(latents, text_embeddings, end_iteration=end_iteration, eta=eta, variance_scale=variance_scale, **kwargs)
|
| 425 |
+
returned_latents = latents_steps
|
| 426 |
+
latents_steps = [self.decode(latents.to(self.unet.device)) for latents in latents_steps]
|
| 427 |
+
images_steps = [self.to_image(latents) for latents in latents_steps]
|
| 428 |
+
|
| 429 |
+
np_latents = np.array([latents.cpu().numpy() for latents in latents_steps])
|
| 430 |
+
|
| 431 |
+
print("latents_steps shape: ", np_latents.shape)
|
| 432 |
+
|
| 433 |
+
# for i in range(len(images_steps)):
|
| 434 |
+
# self.safety_checker = self.safety_checker.float()
|
| 435 |
+
# safety_checker_input = self.feature_extractor(images_steps[i], return_tensors="pt").to(latents_steps[0].device)
|
| 436 |
+
# image, has_nsfw_concept = self.safety_checker(
|
| 437 |
+
# images=latents_steps[i].float().cpu().numpy(), clip_input=safety_checker_input.pixel_values.float()
|
| 438 |
+
# )
|
| 439 |
+
|
| 440 |
+
# images_steps[i][0] = self.to_image(torch.from_numpy(image))[0]
|
| 441 |
+
|
| 442 |
+
np_images_steps = np.array(images_steps)
|
| 443 |
+
|
| 444 |
+
final_steps = list(zip(*images_steps))
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
# '*' unpacks the images_steps
|
| 448 |
+
|
| 449 |
+
if trace_steps:
|
| 450 |
+
return images_steps, trace_steps
|
| 451 |
+
|
| 452 |
+
return final_steps, np_images_steps, latents_steps, returned_latents, noise_preds, output_steps
|
| 453 |
+
|
| 454 |
+
class FineTunedModel(torch.nn.Module):
|
| 455 |
+
|
| 456 |
+
def __init__(self,
|
| 457 |
+
model,
|
| 458 |
+
train_method,
|
| 459 |
+
):
|
| 460 |
+
|
| 461 |
+
super().__init__()
|
| 462 |
+
|
| 463 |
+
self.model = model
|
| 464 |
+
self.ft_modules = {}
|
| 465 |
+
self.orig_modules = {}
|
| 466 |
+
|
| 467 |
+
freeze(self.model)
|
| 468 |
+
|
| 469 |
+
for module_name, module in model.named_modules():
|
| 470 |
+
if 'unet' not in module_name:
|
| 471 |
+
continue
|
| 472 |
+
if module.__class__.__name__ in ["Linear", "Conv2d", "LoRACompatibleLinear", "LoRACompatibleConv"]:
|
| 473 |
+
if train_method == 'xattn':
|
| 474 |
+
if 'attn2' not in module_name:
|
| 475 |
+
continue
|
| 476 |
+
elif train_method == 'xattn-strict':
|
| 477 |
+
if 'attn2' not in module_name or 'to_q' not in module_name or 'to_k' not in module_name:
|
| 478 |
+
continue
|
| 479 |
+
elif train_method == 'noxattn':
|
| 480 |
+
if 'attn2' in module_name:
|
| 481 |
+
continue
|
| 482 |
+
elif train_method == 'selfattn':
|
| 483 |
+
if 'attn1' not in module_name:
|
| 484 |
+
continue
|
| 485 |
+
else:
|
| 486 |
+
raise NotImplementedError(
|
| 487 |
+
f"train_method: {train_method} is not implemented."
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
ft_module = copy.deepcopy(module)
|
| 491 |
+
|
| 492 |
+
self.orig_modules[module_name] = module
|
| 493 |
+
self.ft_modules[module_name] = ft_module
|
| 494 |
+
|
| 495 |
+
unfreeze(ft_module)
|
| 496 |
+
|
| 497 |
+
self.ft_modules_list = torch.nn.ModuleList(self.ft_modules.values())
|
| 498 |
+
self.orig_modules_list = torch.nn.ModuleList(self.orig_modules.values())
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@classmethod
|
| 502 |
+
def from_checkpoint(cls, model, checkpoint, train_method):
|
| 503 |
+
|
| 504 |
+
if isinstance(checkpoint, str):
|
| 505 |
+
checkpoint = torch.load(checkpoint)
|
| 506 |
+
|
| 507 |
+
modules = [f"{key}$" for key in list(checkpoint.keys())]
|
| 508 |
+
|
| 509 |
+
ftm = FineTunedModel(model, train_method=train_method)
|
| 510 |
+
ftm.load_state_dict(checkpoint)
|
| 511 |
+
|
| 512 |
+
return ftm
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def __enter__(self):
|
| 516 |
+
|
| 517 |
+
for key, ft_module in self.ft_modules.items():
|
| 518 |
+
set_module(self.model, key, ft_module)
|
| 519 |
+
|
| 520 |
+
def __exit__(self, exc_type, exc_value, tb):
|
| 521 |
+
|
| 522 |
+
for key, module in self.orig_modules.items():
|
| 523 |
+
set_module(self.model, key, module)
|
| 524 |
+
|
| 525 |
+
def parameters(self):
|
| 526 |
+
|
| 527 |
+
parameters = []
|
| 528 |
+
|
| 529 |
+
for ft_module in self.ft_modules.values():
|
| 530 |
+
|
| 531 |
+
parameters.extend(list(ft_module.parameters()))
|
| 532 |
+
|
| 533 |
+
return parameters
|
| 534 |
+
|
| 535 |
+
def state_dict(self):
|
| 536 |
+
|
| 537 |
+
state_dict = {key: module.state_dict() for key, module in self.ft_modules.items()}
|
| 538 |
+
|
| 539 |
+
return state_dict
|
| 540 |
+
|
| 541 |
+
def load_state_dict(self, state_dict):
|
| 542 |
+
|
| 543 |
+
for key, sd in state_dict.items():
|
| 544 |
+
|
| 545 |
+
self.ft_modules[key].load_state_dict(sd)
|
| 546 |
+
def train(erase_concept, erase_from, train_method, iterations, negative_guidance, lr, save_path):
|
| 547 |
+
|
| 548 |
+
nsteps = 50
|
| 549 |
+
|
| 550 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 551 |
+
diffuser.train()
|
| 552 |
+
|
| 553 |
+
finetuner = FineTunedModel(diffuser, train_method=train_method)
|
| 554 |
+
|
| 555 |
+
optimizer = torch.optim.Adam(finetuner.parameters(), lr=lr)
|
| 556 |
+
criteria = torch.nn.MSELoss()
|
| 557 |
+
|
| 558 |
+
pbar = tqdm(range(iterations))
|
| 559 |
+
erase_concept = erase_concept.split(',')
|
| 560 |
+
erase_concept = [a.strip() for a in erase_concept]
|
| 561 |
+
|
| 562 |
+
erase_from = erase_from.split(',')
|
| 563 |
+
erase_from = [a.strip() for a in erase_from]
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
if len(erase_from)!=len(erase_concept):
|
| 567 |
+
if len(erase_from) == 1:
|
| 568 |
+
c = erase_from[0]
|
| 569 |
+
erase_from = [c for _ in erase_concept]
|
| 570 |
+
else:
|
| 571 |
+
print(erase_from, erase_concept)
|
| 572 |
+
raise Exception("Erase from concepts length need to match erase concepts length")
|
| 573 |
+
|
| 574 |
+
erase_concept_ = []
|
| 575 |
+
for e, f in zip(erase_concept, erase_from):
|
| 576 |
+
erase_concept_.append([e,f])
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
erase_concept = erase_concept_
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
print(erase_concept)
|
| 585 |
+
|
| 586 |
+
# del diffuser.vae
|
| 587 |
+
# del diffuser.text_encoder
|
| 588 |
+
# del diffuser.tokenizer
|
| 589 |
+
|
| 590 |
+
torch.cuda.empty_cache()
|
| 591 |
+
|
| 592 |
+
for i in pbar:
|
| 593 |
+
with torch.no_grad():
|
| 594 |
+
index = np.random.choice(len(erase_concept), 1, replace=False)[0]
|
| 595 |
+
erase_concept_sampled = erase_concept[index]
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
neutral_text_embeddings = diffuser.get_text_embeddings([''],n_imgs=1)
|
| 599 |
+
positive_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[0]],n_imgs=1)
|
| 600 |
+
target_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[1]],n_imgs=1)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
diffuser.set_scheduler_timesteps(nsteps)
|
| 604 |
+
|
| 605 |
+
optimizer.zero_grad()
|
| 606 |
+
|
| 607 |
+
iteration = torch.randint(1, nsteps - 1, (1,)).item()
|
| 608 |
+
|
| 609 |
+
latents = diffuser.get_initial_latents(1, 512, 1)
|
| 610 |
+
|
| 611 |
+
with finetuner:
|
| 612 |
+
|
| 613 |
+
latents_steps, _ = diffuser.diffusion(
|
| 614 |
+
latents,
|
| 615 |
+
positive_text_embeddings,
|
| 616 |
+
start_iteration=0,
|
| 617 |
+
end_iteration=iteration,
|
| 618 |
+
guidance_scale=3,
|
| 619 |
+
show_progress=False
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
diffuser.set_scheduler_timesteps(1000)
|
| 623 |
+
|
| 624 |
+
iteration = int(iteration / nsteps * 1000)
|
| 625 |
+
|
| 626 |
+
positive_latents = diffuser.predict_noise(iteration, latents_steps[0], positive_text_embeddings, guidance_scale=1)
|
| 627 |
+
neutral_latents = diffuser.predict_noise(iteration, latents_steps[0], neutral_text_embeddings, guidance_scale=1)
|
| 628 |
+
target_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 629 |
+
if erase_concept_sampled[0] == erase_concept_sampled[1]:
|
| 630 |
+
target_latents = neutral_latents.clone().detach()
|
| 631 |
+
with finetuner:
|
| 632 |
+
negative_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 633 |
+
|
| 634 |
+
positive_latents.requires_grad = False
|
| 635 |
+
neutral_latents.requires_grad = False
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
loss = criteria(negative_latents, target_latents - (negative_guidance*(positive_latents - neutral_latents))) #loss = criteria(e_n, e_0) works the best try 5000 epochs
|
| 639 |
+
|
| 640 |
+
loss.backward()
|
| 641 |
+
optimizer.step()
|
| 642 |
+
|
| 643 |
+
torch.save(finetuner.state_dict(), save_path)
|
| 644 |
+
|
| 645 |
+
del diffuser, loss, optimizer, finetuner, negative_latents, neutral_latents, positive_latents, latents_steps, latents
|
| 646 |
+
|
| 647 |
+
torch.cuda.empty_cache()
|
| 648 |
+
|
| 649 |
+
if __name__ == '__main__':
|
| 650 |
+
model_path='ESD_Models/car_noxattn_200.pt'
|
| 651 |
+
state_dict = torch.load(model_path)
|
| 652 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 653 |
+
finetuner = FineTunedModel(diffuser, train_method='noxattn')
|
| 654 |
+
|
| 655 |
+
finetuner.load_state_dict(state_dict)
|
| 656 |
+
|
| 657 |
+
#generation loop
|
| 658 |
+
all_images = []
|
| 659 |
+
with finetuner:
|
| 660 |
+
images = diffuser('image of a car', n_steps=50, generator=torch.manual_seed(2440), eta=1.0)
|
| 661 |
+
plt.imshow(images[0][0])
|
Attack_code/Noisy Diffusion(Eta attack)/gradient_asc_clip.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ESD Training Scripts/esd_diffusers.py
ADDED
|
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from matplotlib import pyplot as plt
|
| 3 |
+
import textwrap
|
| 4 |
+
import argparse
|
| 5 |
+
import torch
|
| 6 |
+
import copy
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import numpy as np
|
| 10 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from tqdm.auto import tqdm
|
| 13 |
+
from transformers import CLIPTextModel, CLIPTokenizer, CLIPFeatureExtractor
|
| 14 |
+
from diffusers.schedulers import EulerAncestralDiscreteScheduler
|
| 15 |
+
from diffusers.schedulers.scheduling_ddim import DDIMScheduler
|
| 16 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
|
| 17 |
+
from diffusers.schedulers.scheduling_lms_discrete import LMSDiscreteScheduler
|
| 18 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 19 |
+
|
| 20 |
+
def to_gif(images, path):
|
| 21 |
+
|
| 22 |
+
images[0].save(path, save_all=True,
|
| 23 |
+
append_images=images[1:], loop=0, duration=len(images) * 20)
|
| 24 |
+
|
| 25 |
+
def figure_to_image(figure):
|
| 26 |
+
|
| 27 |
+
figure.set_dpi(300)
|
| 28 |
+
|
| 29 |
+
figure.canvas.draw()
|
| 30 |
+
|
| 31 |
+
return Image.frombytes('RGB', figure.canvas.get_width_height(), figure.canvas.tostring_rgb())
|
| 32 |
+
|
| 33 |
+
def image_grid(images, outpath=None, column_titles=None, row_titles=None):
|
| 34 |
+
|
| 35 |
+
n_rows = len(images)
|
| 36 |
+
n_cols = len(images[0])
|
| 37 |
+
|
| 38 |
+
fig, axs = plt.subplots(nrows=n_rows, ncols=n_cols,
|
| 39 |
+
figsize=(n_cols, n_rows), squeeze=False)
|
| 40 |
+
|
| 41 |
+
for row, _images in enumerate(images):
|
| 42 |
+
|
| 43 |
+
for column, image in enumerate(_images):
|
| 44 |
+
ax = axs[row][column]
|
| 45 |
+
ax.imshow(image)
|
| 46 |
+
if column_titles and row == 0:
|
| 47 |
+
ax.set_title(textwrap.fill(
|
| 48 |
+
column_titles[column], width=12), fontsize='x-small')
|
| 49 |
+
if row_titles and column == 0:
|
| 50 |
+
ax.set_ylabel(row_titles[row], rotation=0, fontsize='x-small', labelpad=1.6 * len(row_titles[row]))
|
| 51 |
+
ax.set_xticks([])
|
| 52 |
+
ax.set_yticks([])
|
| 53 |
+
|
| 54 |
+
plt.subplots_adjust(wspace=0, hspace=0)
|
| 55 |
+
|
| 56 |
+
if outpath is not None:
|
| 57 |
+
plt.savefig(outpath, bbox_inches='tight', dpi=300)
|
| 58 |
+
plt.close()
|
| 59 |
+
else:
|
| 60 |
+
plt.tight_layout(pad=0)
|
| 61 |
+
image = figure_to_image(plt.gcf())
|
| 62 |
+
plt.close()
|
| 63 |
+
return image
|
| 64 |
+
|
| 65 |
+
def get_module(module, module_name):
|
| 66 |
+
|
| 67 |
+
if isinstance(module_name, str):
|
| 68 |
+
module_name = module_name.split('.')
|
| 69 |
+
|
| 70 |
+
if len(module_name) == 0:
|
| 71 |
+
return module
|
| 72 |
+
else:
|
| 73 |
+
module = getattr(module, module_name[0])
|
| 74 |
+
return get_module(module, module_name[1:])
|
| 75 |
+
|
| 76 |
+
def set_module(module, module_name, new_module):
|
| 77 |
+
|
| 78 |
+
if isinstance(module_name, str):
|
| 79 |
+
module_name = module_name.split('.')
|
| 80 |
+
|
| 81 |
+
if len(module_name) == 1:
|
| 82 |
+
return setattr(module, module_name[0], new_module)
|
| 83 |
+
else:
|
| 84 |
+
module = getattr(module, module_name[0])
|
| 85 |
+
return set_module(module, module_name[1:], new_module)
|
| 86 |
+
|
| 87 |
+
def freeze(module):
|
| 88 |
+
|
| 89 |
+
for parameter in module.parameters():
|
| 90 |
+
|
| 91 |
+
parameter.requires_grad = False
|
| 92 |
+
|
| 93 |
+
def unfreeze(module):
|
| 94 |
+
|
| 95 |
+
for parameter in module.parameters():
|
| 96 |
+
|
| 97 |
+
parameter.requires_grad = True
|
| 98 |
+
|
| 99 |
+
def get_concat_h(im1, im2):
|
| 100 |
+
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
|
| 101 |
+
dst.paste(im1, (0, 0))
|
| 102 |
+
dst.paste(im2, (im1.width, 0))
|
| 103 |
+
return dst
|
| 104 |
+
|
| 105 |
+
def get_concat_v(im1, im2):
|
| 106 |
+
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
|
| 107 |
+
dst.paste(im1, (0, 0))
|
| 108 |
+
dst.paste(im2, (0, im1.height))
|
| 109 |
+
return dst
|
| 110 |
+
|
| 111 |
+
class StableDiffuser(torch.nn.Module):
|
| 112 |
+
|
| 113 |
+
def __init__(self,
|
| 114 |
+
scheduler='LMS'
|
| 115 |
+
):
|
| 116 |
+
|
| 117 |
+
super().__init__()
|
| 118 |
+
|
| 119 |
+
# Load the autoencoder model which will be used to decode the latents into image space.
|
| 120 |
+
self.vae = AutoencoderKL.from_pretrained(
|
| 121 |
+
"CompVis/stable-diffusion-v1-4", subfolder="vae")
|
| 122 |
+
|
| 123 |
+
# Load the tokenizer and text encoder to tokenize and encode the text.
|
| 124 |
+
self.tokenizer = CLIPTokenizer.from_pretrained(
|
| 125 |
+
"openai/clip-vit-large-patch14")
|
| 126 |
+
self.text_encoder = CLIPTextModel.from_pretrained(
|
| 127 |
+
"openai/clip-vit-large-patch14")
|
| 128 |
+
|
| 129 |
+
# The UNet model for generating the latents.
|
| 130 |
+
self.unet = UNet2DConditionModel.from_pretrained(
|
| 131 |
+
"CompVis/stable-diffusion-v1-4", subfolder="unet")
|
| 132 |
+
|
| 133 |
+
self.feature_extractor = CLIPFeatureExtractor.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="feature_extractor")
|
| 134 |
+
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="safety_checker")
|
| 135 |
+
|
| 136 |
+
if scheduler == 'LMS':
|
| 137 |
+
self.scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 138 |
+
elif scheduler == 'DDIM':
|
| 139 |
+
self.scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 140 |
+
elif scheduler == 'DDPM':
|
| 141 |
+
self.scheduler = DDPMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 142 |
+
|
| 143 |
+
self.eval()
|
| 144 |
+
|
| 145 |
+
def get_noise(self, batch_size, img_size, generator=None):
|
| 146 |
+
|
| 147 |
+
param = list(self.parameters())[0]
|
| 148 |
+
|
| 149 |
+
return torch.randn(
|
| 150 |
+
(batch_size, self.unet.in_channels, img_size // 8, img_size // 8),
|
| 151 |
+
generator=generator).type(param.dtype).to(param.device)
|
| 152 |
+
|
| 153 |
+
def add_noise(self, latents, noise, step):
|
| 154 |
+
|
| 155 |
+
return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]]))
|
| 156 |
+
|
| 157 |
+
def text_tokenize(self, prompts):
|
| 158 |
+
|
| 159 |
+
return self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
| 160 |
+
|
| 161 |
+
def text_detokenize(self, tokens):
|
| 162 |
+
|
| 163 |
+
return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1]
|
| 164 |
+
|
| 165 |
+
def text_encode(self, tokens):
|
| 166 |
+
|
| 167 |
+
return self.text_encoder(tokens.input_ids.to(self.unet.device))[0]
|
| 168 |
+
|
| 169 |
+
def decode(self, latents):
|
| 170 |
+
|
| 171 |
+
return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample
|
| 172 |
+
|
| 173 |
+
def encode(self, tensors):
|
| 174 |
+
|
| 175 |
+
return self.vae.encode(tensors).latent_dist.mode() * 0.18215
|
| 176 |
+
|
| 177 |
+
def to_image(self, image):
|
| 178 |
+
|
| 179 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 180 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 181 |
+
images = (image * 255).round().astype("uint8")
|
| 182 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 183 |
+
|
| 184 |
+
return pil_images
|
| 185 |
+
|
| 186 |
+
def set_scheduler_timesteps(self, n_steps):
|
| 187 |
+
self.scheduler.set_timesteps(n_steps, device=self.unet.device)
|
| 188 |
+
|
| 189 |
+
def get_initial_latents(self, n_imgs, img_size, n_prompts, generator=None):
|
| 190 |
+
|
| 191 |
+
noise = self.get_noise(n_imgs, img_size, generator=generator).repeat(n_prompts, 1, 1, 1)
|
| 192 |
+
|
| 193 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 194 |
+
|
| 195 |
+
return latents
|
| 196 |
+
|
| 197 |
+
def get_text_embeddings(self, prompts, n_imgs):
|
| 198 |
+
|
| 199 |
+
text_tokens = self.text_tokenize(prompts)
|
| 200 |
+
|
| 201 |
+
text_embeddings = self.text_encode(text_tokens)
|
| 202 |
+
|
| 203 |
+
unconditional_tokens = self.text_tokenize([""] * len(prompts))
|
| 204 |
+
|
| 205 |
+
unconditional_embeddings = self.text_encode(unconditional_tokens)
|
| 206 |
+
|
| 207 |
+
text_embeddings = torch.cat([unconditional_embeddings, text_embeddings]).repeat_interleave(n_imgs, dim=0)
|
| 208 |
+
|
| 209 |
+
return text_embeddings
|
| 210 |
+
|
| 211 |
+
def predict_noise(self,
|
| 212 |
+
iteration,
|
| 213 |
+
latents,
|
| 214 |
+
text_embeddings,
|
| 215 |
+
guidance_scale=7.5
|
| 216 |
+
):
|
| 217 |
+
|
| 218 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
| 219 |
+
latents = torch.cat([latents] * 2)
|
| 220 |
+
latents = self.scheduler.scale_model_input(
|
| 221 |
+
latents, self.scheduler.timesteps[iteration])
|
| 222 |
+
|
| 223 |
+
# predict the noise residual
|
| 224 |
+
noise_prediction = self.unet(
|
| 225 |
+
latents, self.scheduler.timesteps[iteration], encoder_hidden_states=text_embeddings).sample
|
| 226 |
+
|
| 227 |
+
# perform guidance
|
| 228 |
+
noise_prediction_uncond, noise_prediction_text = noise_prediction.chunk(2)
|
| 229 |
+
noise_prediction = noise_prediction_uncond + guidance_scale * \
|
| 230 |
+
(noise_prediction_text - noise_prediction_uncond)
|
| 231 |
+
|
| 232 |
+
return noise_prediction
|
| 233 |
+
|
| 234 |
+
@torch.no_grad()
|
| 235 |
+
def diffusion(self,
|
| 236 |
+
latents,
|
| 237 |
+
text_embeddings,
|
| 238 |
+
end_iteration=1000,
|
| 239 |
+
start_iteration=0,
|
| 240 |
+
return_steps=False,
|
| 241 |
+
pred_x0=False,
|
| 242 |
+
trace_args=None,
|
| 243 |
+
show_progress=True,
|
| 244 |
+
**kwargs):
|
| 245 |
+
|
| 246 |
+
latents_steps = []
|
| 247 |
+
trace_steps = []
|
| 248 |
+
|
| 249 |
+
trace = None
|
| 250 |
+
|
| 251 |
+
for iteration in tqdm(range(start_iteration, end_iteration), disable=not show_progress):
|
| 252 |
+
|
| 253 |
+
if trace_args:
|
| 254 |
+
|
| 255 |
+
trace = TraceDict(self, **trace_args)
|
| 256 |
+
|
| 257 |
+
noise_pred = self.predict_noise(
|
| 258 |
+
iteration,
|
| 259 |
+
latents,
|
| 260 |
+
text_embeddings,
|
| 261 |
+
**kwargs)
|
| 262 |
+
|
| 263 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 264 |
+
output = self.scheduler.step(noise_pred, self.scheduler.timesteps[iteration], latents)
|
| 265 |
+
|
| 266 |
+
if trace_args:
|
| 267 |
+
|
| 268 |
+
trace.close()
|
| 269 |
+
|
| 270 |
+
trace_steps.append(trace)
|
| 271 |
+
|
| 272 |
+
latents = output.prev_sample
|
| 273 |
+
|
| 274 |
+
if return_steps or iteration == end_iteration - 1:
|
| 275 |
+
|
| 276 |
+
output = output.pred_original_sample if pred_x0 else latents
|
| 277 |
+
|
| 278 |
+
if return_steps:
|
| 279 |
+
latents_steps.append(output.cpu())
|
| 280 |
+
else:
|
| 281 |
+
latents_steps.append(output)
|
| 282 |
+
|
| 283 |
+
return latents_steps, trace_steps
|
| 284 |
+
|
| 285 |
+
@torch.no_grad()
|
| 286 |
+
def __call__(self,
|
| 287 |
+
prompts,
|
| 288 |
+
img_size=512,
|
| 289 |
+
n_steps=50,
|
| 290 |
+
n_imgs=1,
|
| 291 |
+
end_iteration=None,
|
| 292 |
+
generator=None,
|
| 293 |
+
**kwargs
|
| 294 |
+
):
|
| 295 |
+
|
| 296 |
+
assert 0 <= n_steps <= 1000
|
| 297 |
+
|
| 298 |
+
if not isinstance(prompts, list):
|
| 299 |
+
|
| 300 |
+
prompts = [prompts]
|
| 301 |
+
|
| 302 |
+
self.set_scheduler_timesteps(n_steps)
|
| 303 |
+
|
| 304 |
+
latents = self.get_initial_latents(n_imgs, img_size, len(prompts), generator=generator)
|
| 305 |
+
|
| 306 |
+
text_embeddings = self.get_text_embeddings(prompts,n_imgs=n_imgs)
|
| 307 |
+
|
| 308 |
+
end_iteration = end_iteration or n_steps
|
| 309 |
+
|
| 310 |
+
latents_steps, trace_steps = self.diffusion(
|
| 311 |
+
latents,
|
| 312 |
+
text_embeddings,
|
| 313 |
+
end_iteration=end_iteration,
|
| 314 |
+
**kwargs
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
latents_steps = [self.decode(latents.to(self.unet.device)) for latents in latents_steps]
|
| 318 |
+
images_steps = [self.to_image(latents) for latents in latents_steps]
|
| 319 |
+
|
| 320 |
+
# for i in range(len(images_steps)):
|
| 321 |
+
# self.safety_checker = self.safety_checker.float()
|
| 322 |
+
# safety_checker_input = self.feature_extractor(images_steps[i], return_tensors="pt").to(latents_steps[0].device)
|
| 323 |
+
# image, has_nsfw_concept = self.safety_checker(
|
| 324 |
+
# images=latents_steps[i].float().cpu().numpy(), clip_input=safety_checker_input.pixel_values.float()
|
| 325 |
+
# )
|
| 326 |
+
|
| 327 |
+
# images_steps[i][0] = self.to_image(torch.from_numpy(image))[0]
|
| 328 |
+
|
| 329 |
+
images_steps = list(zip(*images_steps))
|
| 330 |
+
|
| 331 |
+
if trace_steps:
|
| 332 |
+
|
| 333 |
+
return images_steps, trace_steps
|
| 334 |
+
|
| 335 |
+
return images_steps
|
| 336 |
+
|
| 337 |
+
class FineTunedModel(torch.nn.Module):
|
| 338 |
+
|
| 339 |
+
def __init__(self,
|
| 340 |
+
model,
|
| 341 |
+
train_method,
|
| 342 |
+
):
|
| 343 |
+
|
| 344 |
+
super().__init__()
|
| 345 |
+
|
| 346 |
+
self.model = model
|
| 347 |
+
self.ft_modules = {}
|
| 348 |
+
self.orig_modules = {}
|
| 349 |
+
|
| 350 |
+
freeze(self.model)
|
| 351 |
+
|
| 352 |
+
for module_name, module in model.named_modules():
|
| 353 |
+
if 'unet' not in module_name:
|
| 354 |
+
continue
|
| 355 |
+
if module.__class__.__name__ in ["Linear", "Conv2d", "LoRACompatibleLinear", "LoRACompatibleConv"]:
|
| 356 |
+
if train_method == 'xattn':
|
| 357 |
+
if 'attn2' not in module_name:
|
| 358 |
+
continue
|
| 359 |
+
elif train_method == 'xattn-strict':
|
| 360 |
+
if 'attn2' not in module_name or 'to_q' not in module_name or 'to_k' not in module_name:
|
| 361 |
+
continue
|
| 362 |
+
elif train_method == 'noxattn':
|
| 363 |
+
if 'attn2' in module_name:
|
| 364 |
+
continue
|
| 365 |
+
elif train_method == 'selfattn':
|
| 366 |
+
if 'attn1' not in module_name:
|
| 367 |
+
continue
|
| 368 |
+
else:
|
| 369 |
+
raise NotImplementedError(
|
| 370 |
+
f"train_method: {train_method} is not implemented."
|
| 371 |
+
)
|
| 372 |
+
print(module_name)
|
| 373 |
+
ft_module = copy.deepcopy(module)
|
| 374 |
+
|
| 375 |
+
self.orig_modules[module_name] = module
|
| 376 |
+
self.ft_modules[module_name] = ft_module
|
| 377 |
+
|
| 378 |
+
unfreeze(ft_module)
|
| 379 |
+
|
| 380 |
+
self.ft_modules_list = torch.nn.ModuleList(self.ft_modules.values())
|
| 381 |
+
self.orig_modules_list = torch.nn.ModuleList(self.orig_modules.values())
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@classmethod
|
| 385 |
+
def from_checkpoint(cls, model, checkpoint, train_method):
|
| 386 |
+
|
| 387 |
+
if isinstance(checkpoint, str):
|
| 388 |
+
checkpoint = torch.load(checkpoint)
|
| 389 |
+
|
| 390 |
+
modules = [f"{key}$" for key in list(checkpoint.keys())]
|
| 391 |
+
|
| 392 |
+
ftm = FineTunedModel(model, train_method=train_method)
|
| 393 |
+
ftm.load_state_dict(checkpoint)
|
| 394 |
+
|
| 395 |
+
return ftm
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def __enter__(self):
|
| 399 |
+
|
| 400 |
+
for key, ft_module in self.ft_modules.items():
|
| 401 |
+
set_module(self.model, key, ft_module)
|
| 402 |
+
|
| 403 |
+
def __exit__(self, exc_type, exc_value, tb):
|
| 404 |
+
|
| 405 |
+
for key, module in self.orig_modules.items():
|
| 406 |
+
set_module(self.model, key, module)
|
| 407 |
+
|
| 408 |
+
def parameters(self):
|
| 409 |
+
|
| 410 |
+
parameters = []
|
| 411 |
+
|
| 412 |
+
for ft_module in self.ft_modules.values():
|
| 413 |
+
|
| 414 |
+
parameters.extend(list(ft_module.parameters()))
|
| 415 |
+
|
| 416 |
+
return parameters
|
| 417 |
+
|
| 418 |
+
def state_dict(self):
|
| 419 |
+
|
| 420 |
+
state_dict = {key: module.state_dict() for key, module in self.ft_modules.items()}
|
| 421 |
+
|
| 422 |
+
return state_dict
|
| 423 |
+
|
| 424 |
+
def load_state_dict(self, state_dict):
|
| 425 |
+
|
| 426 |
+
for key, sd in state_dict.items():
|
| 427 |
+
|
| 428 |
+
self.ft_modules[key].load_state_dict(sd)
|
| 429 |
+
def train(erase_concept, erase_from, train_method, iterations, negative_guidance, lr, save_path):
|
| 430 |
+
|
| 431 |
+
nsteps = 50
|
| 432 |
+
|
| 433 |
+
diffuser = StableDiffuser(scheduler='DDIM').to('cuda')
|
| 434 |
+
diffuser.train()
|
| 435 |
+
|
| 436 |
+
finetuner = FineTunedModel(diffuser, train_method=train_method)
|
| 437 |
+
|
| 438 |
+
optimizer = torch.optim.Adam(finetuner.parameters(), lr=lr)
|
| 439 |
+
criteria = torch.nn.MSELoss()
|
| 440 |
+
|
| 441 |
+
pbar = tqdm(range(iterations))
|
| 442 |
+
erase_concept = erase_concept.split(',')
|
| 443 |
+
erase_concept = [a.strip() for a in erase_concept]
|
| 444 |
+
|
| 445 |
+
erase_from = erase_from.split(',')
|
| 446 |
+
erase_from = [a.strip() for a in erase_from]
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
if len(erase_from)!=len(erase_concept):
|
| 450 |
+
if len(erase_from) == 1:
|
| 451 |
+
c = erase_from[0]
|
| 452 |
+
erase_from = [c for _ in erase_concept]
|
| 453 |
+
else:
|
| 454 |
+
print(erase_from, erase_concept)
|
| 455 |
+
raise Exception("Erase from concepts length need to match erase concepts length")
|
| 456 |
+
|
| 457 |
+
erase_concept_ = []
|
| 458 |
+
for e, f in zip(erase_concept, erase_from):
|
| 459 |
+
erase_concept_.append([e,f])
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
erase_concept = erase_concept_
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
print(erase_concept)
|
| 468 |
+
|
| 469 |
+
# del diffuser.vae
|
| 470 |
+
# del diffuser.text_encoder
|
| 471 |
+
# del diffuser.tokenizer
|
| 472 |
+
|
| 473 |
+
torch.cuda.empty_cache()
|
| 474 |
+
|
| 475 |
+
for i in pbar:
|
| 476 |
+
with torch.no_grad():
|
| 477 |
+
index = np.random.choice(len(erase_concept), 1, replace=False)[0]
|
| 478 |
+
erase_concept_sampled = erase_concept[index]
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
neutral_text_embeddings = diffuser.get_text_embeddings([''],n_imgs=1)
|
| 482 |
+
positive_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[0]],n_imgs=1)
|
| 483 |
+
target_text_embeddings = diffuser.get_text_embeddings([erase_concept_sampled[1]],n_imgs=1)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
diffuser.set_scheduler_timesteps(nsteps)
|
| 487 |
+
|
| 488 |
+
optimizer.zero_grad()
|
| 489 |
+
|
| 490 |
+
iteration = torch.randint(1, nsteps - 1, (1,)).item()
|
| 491 |
+
|
| 492 |
+
latents = diffuser.get_initial_latents(1, 512, 1)
|
| 493 |
+
|
| 494 |
+
with finetuner:
|
| 495 |
+
|
| 496 |
+
latents_steps, _ = diffuser.diffusion(
|
| 497 |
+
latents,
|
| 498 |
+
positive_text_embeddings,
|
| 499 |
+
start_iteration=0,
|
| 500 |
+
end_iteration=iteration,
|
| 501 |
+
guidance_scale=3,
|
| 502 |
+
show_progress=False
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
diffuser.set_scheduler_timesteps(1000)
|
| 506 |
+
|
| 507 |
+
iteration = int(iteration / nsteps * 1000)
|
| 508 |
+
|
| 509 |
+
positive_latents = diffuser.predict_noise(iteration, latents_steps[0], positive_text_embeddings, guidance_scale=1)
|
| 510 |
+
neutral_latents = diffuser.predict_noise(iteration, latents_steps[0], neutral_text_embeddings, guidance_scale=1)
|
| 511 |
+
target_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 512 |
+
if erase_concept_sampled[0] == erase_concept_sampled[1]:
|
| 513 |
+
target_latents = neutral_latents.clone().detach()
|
| 514 |
+
with finetuner:
|
| 515 |
+
negative_latents = diffuser.predict_noise(iteration, latents_steps[0], target_text_embeddings, guidance_scale=1)
|
| 516 |
+
|
| 517 |
+
positive_latents.requires_grad = False
|
| 518 |
+
neutral_latents.requires_grad = False
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
loss = criteria(negative_latents, target_latents - (negative_guidance*(positive_latents - neutral_latents))) #loss = criteria(e_n, e_0) works the best try 5000 epochs
|
| 522 |
+
|
| 523 |
+
loss.backward()
|
| 524 |
+
optimizer.step()
|
| 525 |
+
|
| 526 |
+
torch.save(finetuner.state_dict(), save_path)
|
| 527 |
+
|
| 528 |
+
del diffuser, loss, optimizer, finetuner, negative_latents, neutral_latents, positive_latents, latents_steps, latents
|
| 529 |
+
|
| 530 |
+
torch.cuda.empty_cache()
|
| 531 |
+
|
| 532 |
+
if __name__ == '__main__':
|
| 533 |
+
parser = argparse.ArgumentParser(
|
| 534 |
+
prog = 'TrainESD',
|
| 535 |
+
description = 'Finetuning stable diffusion to erase the concepts')
|
| 536 |
+
parser.add_argument('--erase_concept', help='concept to erase', type=str, required=True)
|
| 537 |
+
parser.add_argument('--erase_from', help='target concept to erase from', type=str, required=False, default = None)
|
| 538 |
+
parser.add_argument('--train_method', help='Type of method (xattn, noxattn, full, xattn-strict', type=str, required=True)
|
| 539 |
+
parser.add_argument('--iterations', help='Number of iterations', type=int, default=200)
|
| 540 |
+
parser.add_argument('--lr', help='Learning rate', type=float, default=2e-5)
|
| 541 |
+
parser.add_argument('--negative_guidance', help='Negative guidance value', type=float, required=False, default=1)
|
| 542 |
+
parser.add_argument('--save_path', help='Path to save model', type=str, default='models/')
|
| 543 |
+
parser.add_argument('--device', help='cuda device to train on', type=str, required=False, default='cuda:0')
|
| 544 |
+
|
| 545 |
+
args = parser.parse_args()
|
| 546 |
+
|
| 547 |
+
prompt = args.erase_concept #'car'
|
| 548 |
+
erase_concept = args.erase_concept
|
| 549 |
+
erase_from = args.erase_from
|
| 550 |
+
if erase_from is None:
|
| 551 |
+
erase_from = erase_concept
|
| 552 |
+
train_method = args.train_method #'noxattn'
|
| 553 |
+
iterations = args.iterations #200
|
| 554 |
+
negative_guidance = args.negative_guidance #1
|
| 555 |
+
lr = args.lr #1e-5
|
| 556 |
+
name = f"esd-{erase_concept.lower().replace(' ','').replace(',','')}_from_{erase_from.lower().replace(' ','').replace(',','')}-{train_method}_{negative_guidance}-epochs_{iterations}"
|
| 557 |
+
if not os.path.exists(args.save_path):
|
| 558 |
+
os.makedirs(args.save_path, exist_ok = True)
|
| 559 |
+
save_path = f'{args.save_path}/{name}.pt'
|
| 560 |
+
train(erase_concept=erase_concept, erase_from=erase_from, train_method=train_method, iterations=iterations, negative_guidance=negative_guidance, lr=lr, save_path=save_path)
|
Pipfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[[source]]
|
| 2 |
+
url = "https://pypi.org/simple"
|
| 3 |
+
verify_ssl = true
|
| 4 |
+
name = "pypi"
|
| 5 |
+
|
| 6 |
+
[packages]
|
| 7 |
+
pillow = "*"
|
| 8 |
+
requests = "*"
|
| 9 |
+
pandas = "*"
|
| 10 |
+
numpy = "*"
|
| 11 |
+
transformers = "*"
|
| 12 |
+
torch = "*"
|
| 13 |
+
diffusers = "*"
|
| 14 |
+
|
| 15 |
+
[dev-packages]
|
| 16 |
+
|
| 17 |
+
[requires]
|
| 18 |
+
python_version = "3.12"
|
| 19 |
+
python_full_version = "3.12.6"
|
Pipfile.lock
ADDED
|
@@ -0,0 +1,981 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_meta": {
|
| 3 |
+
"hash": {
|
| 4 |
+
"sha256": "c187f0d79da53236f1ef7cf6e37c48bd750594b5870a901518759b09f45830f0"
|
| 5 |
+
},
|
| 6 |
+
"pipfile-spec": 6,
|
| 7 |
+
"requires": {
|
| 8 |
+
"python_full_version": "3.12.6",
|
| 9 |
+
"python_version": "3.12"
|
| 10 |
+
},
|
| 11 |
+
"sources": [
|
| 12 |
+
{
|
| 13 |
+
"name": "pypi",
|
| 14 |
+
"url": "https://pypi.org/simple",
|
| 15 |
+
"verify_ssl": true
|
| 16 |
+
}
|
| 17 |
+
]
|
| 18 |
+
},
|
| 19 |
+
"default": {
|
| 20 |
+
"certifi": {
|
| 21 |
+
"hashes": [
|
| 22 |
+
"sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8",
|
| 23 |
+
"sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"
|
| 24 |
+
],
|
| 25 |
+
"markers": "python_version >= '3.6'",
|
| 26 |
+
"version": "==2024.8.30"
|
| 27 |
+
},
|
| 28 |
+
"charset-normalizer": {
|
| 29 |
+
"hashes": [
|
| 30 |
+
"sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621",
|
| 31 |
+
"sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6",
|
| 32 |
+
"sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8",
|
| 33 |
+
"sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912",
|
| 34 |
+
"sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c",
|
| 35 |
+
"sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b",
|
| 36 |
+
"sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d",
|
| 37 |
+
"sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d",
|
| 38 |
+
"sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95",
|
| 39 |
+
"sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e",
|
| 40 |
+
"sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565",
|
| 41 |
+
"sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64",
|
| 42 |
+
"sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab",
|
| 43 |
+
"sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be",
|
| 44 |
+
"sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e",
|
| 45 |
+
"sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907",
|
| 46 |
+
"sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0",
|
| 47 |
+
"sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2",
|
| 48 |
+
"sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62",
|
| 49 |
+
"sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62",
|
| 50 |
+
"sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23",
|
| 51 |
+
"sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc",
|
| 52 |
+
"sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284",
|
| 53 |
+
"sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca",
|
| 54 |
+
"sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455",
|
| 55 |
+
"sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858",
|
| 56 |
+
"sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b",
|
| 57 |
+
"sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594",
|
| 58 |
+
"sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc",
|
| 59 |
+
"sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db",
|
| 60 |
+
"sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b",
|
| 61 |
+
"sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea",
|
| 62 |
+
"sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6",
|
| 63 |
+
"sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920",
|
| 64 |
+
"sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749",
|
| 65 |
+
"sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7",
|
| 66 |
+
"sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd",
|
| 67 |
+
"sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99",
|
| 68 |
+
"sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242",
|
| 69 |
+
"sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee",
|
| 70 |
+
"sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129",
|
| 71 |
+
"sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2",
|
| 72 |
+
"sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51",
|
| 73 |
+
"sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee",
|
| 74 |
+
"sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8",
|
| 75 |
+
"sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b",
|
| 76 |
+
"sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613",
|
| 77 |
+
"sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742",
|
| 78 |
+
"sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe",
|
| 79 |
+
"sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3",
|
| 80 |
+
"sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5",
|
| 81 |
+
"sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631",
|
| 82 |
+
"sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7",
|
| 83 |
+
"sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15",
|
| 84 |
+
"sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c",
|
| 85 |
+
"sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea",
|
| 86 |
+
"sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417",
|
| 87 |
+
"sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250",
|
| 88 |
+
"sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88",
|
| 89 |
+
"sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca",
|
| 90 |
+
"sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa",
|
| 91 |
+
"sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99",
|
| 92 |
+
"sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149",
|
| 93 |
+
"sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41",
|
| 94 |
+
"sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574",
|
| 95 |
+
"sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0",
|
| 96 |
+
"sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f",
|
| 97 |
+
"sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d",
|
| 98 |
+
"sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654",
|
| 99 |
+
"sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3",
|
| 100 |
+
"sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19",
|
| 101 |
+
"sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90",
|
| 102 |
+
"sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578",
|
| 103 |
+
"sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9",
|
| 104 |
+
"sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1",
|
| 105 |
+
"sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51",
|
| 106 |
+
"sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719",
|
| 107 |
+
"sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236",
|
| 108 |
+
"sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a",
|
| 109 |
+
"sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c",
|
| 110 |
+
"sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade",
|
| 111 |
+
"sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944",
|
| 112 |
+
"sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc",
|
| 113 |
+
"sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6",
|
| 114 |
+
"sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6",
|
| 115 |
+
"sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27",
|
| 116 |
+
"sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6",
|
| 117 |
+
"sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2",
|
| 118 |
+
"sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12",
|
| 119 |
+
"sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf",
|
| 120 |
+
"sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114",
|
| 121 |
+
"sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7",
|
| 122 |
+
"sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf",
|
| 123 |
+
"sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d",
|
| 124 |
+
"sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b",
|
| 125 |
+
"sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed",
|
| 126 |
+
"sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03",
|
| 127 |
+
"sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4",
|
| 128 |
+
"sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67",
|
| 129 |
+
"sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365",
|
| 130 |
+
"sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a",
|
| 131 |
+
"sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748",
|
| 132 |
+
"sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b",
|
| 133 |
+
"sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079",
|
| 134 |
+
"sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"
|
| 135 |
+
],
|
| 136 |
+
"markers": "python_full_version >= '3.7.0'",
|
| 137 |
+
"version": "==3.4.0"
|
| 138 |
+
},
|
| 139 |
+
"diffusers": {
|
| 140 |
+
"hashes": [
|
| 141 |
+
"sha256:b1d01a73e45d43a0630c299173915dddd69fc50f2ae8f2ab5de4fd245eaed72f",
|
| 142 |
+
"sha256:cbc498ae63f4abfc7c3a07649cdcbee229ef2f9a9a1f0d19c9bbaf22f8d30c1f"
|
| 143 |
+
],
|
| 144 |
+
"index": "pypi",
|
| 145 |
+
"markers": "python_full_version >= '3.8.0'",
|
| 146 |
+
"version": "==0.31.0"
|
| 147 |
+
},
|
| 148 |
+
"filelock": {
|
| 149 |
+
"hashes": [
|
| 150 |
+
"sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0",
|
| 151 |
+
"sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"
|
| 152 |
+
],
|
| 153 |
+
"markers": "python_version >= '3.8'",
|
| 154 |
+
"version": "==3.16.1"
|
| 155 |
+
},
|
| 156 |
+
"fsspec": {
|
| 157 |
+
"hashes": [
|
| 158 |
+
"sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871",
|
| 159 |
+
"sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"
|
| 160 |
+
],
|
| 161 |
+
"markers": "python_version >= '3.8'",
|
| 162 |
+
"version": "==2024.10.0"
|
| 163 |
+
},
|
| 164 |
+
"huggingface-hub": {
|
| 165 |
+
"hashes": [
|
| 166 |
+
"sha256:414c0d9b769eecc86c70f9d939d0f48bb28e8461dd1130021542eff0212db890",
|
| 167 |
+
"sha256:5927a8fc64ae68859cd954b7cc29d1c8390a5e15caba6d3d349c973be8fdacf3"
|
| 168 |
+
],
|
| 169 |
+
"markers": "python_full_version >= '3.8.0'",
|
| 170 |
+
"version": "==0.26.1"
|
| 171 |
+
},
|
| 172 |
+
"idna": {
|
| 173 |
+
"hashes": [
|
| 174 |
+
"sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9",
|
| 175 |
+
"sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"
|
| 176 |
+
],
|
| 177 |
+
"markers": "python_version >= '3.6'",
|
| 178 |
+
"version": "==3.10"
|
| 179 |
+
},
|
| 180 |
+
"importlib-metadata": {
|
| 181 |
+
"hashes": [
|
| 182 |
+
"sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b",
|
| 183 |
+
"sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"
|
| 184 |
+
],
|
| 185 |
+
"markers": "python_version >= '3.8'",
|
| 186 |
+
"version": "==8.5.0"
|
| 187 |
+
},
|
| 188 |
+
"jinja2": {
|
| 189 |
+
"hashes": [
|
| 190 |
+
"sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369",
|
| 191 |
+
"sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"
|
| 192 |
+
],
|
| 193 |
+
"markers": "python_version >= '3.7'",
|
| 194 |
+
"version": "==3.1.4"
|
| 195 |
+
},
|
| 196 |
+
"markupsafe": {
|
| 197 |
+
"hashes": [
|
| 198 |
+
"sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4",
|
| 199 |
+
"sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30",
|
| 200 |
+
"sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0",
|
| 201 |
+
"sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9",
|
| 202 |
+
"sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396",
|
| 203 |
+
"sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13",
|
| 204 |
+
"sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028",
|
| 205 |
+
"sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca",
|
| 206 |
+
"sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557",
|
| 207 |
+
"sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832",
|
| 208 |
+
"sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0",
|
| 209 |
+
"sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b",
|
| 210 |
+
"sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579",
|
| 211 |
+
"sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a",
|
| 212 |
+
"sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c",
|
| 213 |
+
"sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff",
|
| 214 |
+
"sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c",
|
| 215 |
+
"sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22",
|
| 216 |
+
"sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094",
|
| 217 |
+
"sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb",
|
| 218 |
+
"sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e",
|
| 219 |
+
"sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5",
|
| 220 |
+
"sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a",
|
| 221 |
+
"sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d",
|
| 222 |
+
"sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a",
|
| 223 |
+
"sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b",
|
| 224 |
+
"sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8",
|
| 225 |
+
"sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225",
|
| 226 |
+
"sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c",
|
| 227 |
+
"sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144",
|
| 228 |
+
"sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f",
|
| 229 |
+
"sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87",
|
| 230 |
+
"sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d",
|
| 231 |
+
"sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93",
|
| 232 |
+
"sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf",
|
| 233 |
+
"sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158",
|
| 234 |
+
"sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84",
|
| 235 |
+
"sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb",
|
| 236 |
+
"sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48",
|
| 237 |
+
"sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171",
|
| 238 |
+
"sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c",
|
| 239 |
+
"sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6",
|
| 240 |
+
"sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd",
|
| 241 |
+
"sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d",
|
| 242 |
+
"sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1",
|
| 243 |
+
"sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d",
|
| 244 |
+
"sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca",
|
| 245 |
+
"sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a",
|
| 246 |
+
"sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29",
|
| 247 |
+
"sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe",
|
| 248 |
+
"sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798",
|
| 249 |
+
"sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c",
|
| 250 |
+
"sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8",
|
| 251 |
+
"sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f",
|
| 252 |
+
"sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f",
|
| 253 |
+
"sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a",
|
| 254 |
+
"sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178",
|
| 255 |
+
"sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0",
|
| 256 |
+
"sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79",
|
| 257 |
+
"sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430",
|
| 258 |
+
"sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"
|
| 259 |
+
],
|
| 260 |
+
"markers": "python_version >= '3.9'",
|
| 261 |
+
"version": "==3.0.2"
|
| 262 |
+
},
|
| 263 |
+
"mpmath": {
|
| 264 |
+
"hashes": [
|
| 265 |
+
"sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f",
|
| 266 |
+
"sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"
|
| 267 |
+
],
|
| 268 |
+
"version": "==1.3.0"
|
| 269 |
+
},
|
| 270 |
+
"networkx": {
|
| 271 |
+
"hashes": [
|
| 272 |
+
"sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1",
|
| 273 |
+
"sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"
|
| 274 |
+
],
|
| 275 |
+
"markers": "python_version >= '3.10'",
|
| 276 |
+
"version": "==3.4.2"
|
| 277 |
+
},
|
| 278 |
+
"numpy": {
|
| 279 |
+
"hashes": [
|
| 280 |
+
"sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8",
|
| 281 |
+
"sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466",
|
| 282 |
+
"sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35",
|
| 283 |
+
"sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c",
|
| 284 |
+
"sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4",
|
| 285 |
+
"sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6",
|
| 286 |
+
"sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0",
|
| 287 |
+
"sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7",
|
| 288 |
+
"sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a",
|
| 289 |
+
"sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a",
|
| 290 |
+
"sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e",
|
| 291 |
+
"sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62",
|
| 292 |
+
"sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2",
|
| 293 |
+
"sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5",
|
| 294 |
+
"sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee",
|
| 295 |
+
"sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe",
|
| 296 |
+
"sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a",
|
| 297 |
+
"sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e",
|
| 298 |
+
"sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf",
|
| 299 |
+
"sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c",
|
| 300 |
+
"sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3",
|
| 301 |
+
"sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86",
|
| 302 |
+
"sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df",
|
| 303 |
+
"sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98",
|
| 304 |
+
"sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d",
|
| 305 |
+
"sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2",
|
| 306 |
+
"sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146",
|
| 307 |
+
"sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550",
|
| 308 |
+
"sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8",
|
| 309 |
+
"sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb",
|
| 310 |
+
"sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e",
|
| 311 |
+
"sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d",
|
| 312 |
+
"sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366",
|
| 313 |
+
"sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0",
|
| 314 |
+
"sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db",
|
| 315 |
+
"sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe",
|
| 316 |
+
"sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426",
|
| 317 |
+
"sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952",
|
| 318 |
+
"sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03",
|
| 319 |
+
"sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f",
|
| 320 |
+
"sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7",
|
| 321 |
+
"sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b",
|
| 322 |
+
"sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17",
|
| 323 |
+
"sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5",
|
| 324 |
+
"sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1",
|
| 325 |
+
"sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142",
|
| 326 |
+
"sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884",
|
| 327 |
+
"sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a",
|
| 328 |
+
"sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9",
|
| 329 |
+
"sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445",
|
| 330 |
+
"sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1",
|
| 331 |
+
"sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1",
|
| 332 |
+
"sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"
|
| 333 |
+
],
|
| 334 |
+
"index": "pypi",
|
| 335 |
+
"markers": "python_version >= '3.10'",
|
| 336 |
+
"version": "==2.1.2"
|
| 337 |
+
},
|
| 338 |
+
"packaging": {
|
| 339 |
+
"hashes": [
|
| 340 |
+
"sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002",
|
| 341 |
+
"sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"
|
| 342 |
+
],
|
| 343 |
+
"markers": "python_version >= '3.8'",
|
| 344 |
+
"version": "==24.1"
|
| 345 |
+
},
|
| 346 |
+
"pandas": {
|
| 347 |
+
"hashes": [
|
| 348 |
+
"sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a",
|
| 349 |
+
"sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d",
|
| 350 |
+
"sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5",
|
| 351 |
+
"sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4",
|
| 352 |
+
"sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0",
|
| 353 |
+
"sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32",
|
| 354 |
+
"sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea",
|
| 355 |
+
"sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28",
|
| 356 |
+
"sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f",
|
| 357 |
+
"sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348",
|
| 358 |
+
"sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18",
|
| 359 |
+
"sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468",
|
| 360 |
+
"sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5",
|
| 361 |
+
"sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e",
|
| 362 |
+
"sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667",
|
| 363 |
+
"sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645",
|
| 364 |
+
"sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13",
|
| 365 |
+
"sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30",
|
| 366 |
+
"sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3",
|
| 367 |
+
"sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d",
|
| 368 |
+
"sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb",
|
| 369 |
+
"sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3",
|
| 370 |
+
"sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039",
|
| 371 |
+
"sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8",
|
| 372 |
+
"sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd",
|
| 373 |
+
"sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761",
|
| 374 |
+
"sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659",
|
| 375 |
+
"sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57",
|
| 376 |
+
"sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c",
|
| 377 |
+
"sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c",
|
| 378 |
+
"sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4",
|
| 379 |
+
"sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a",
|
| 380 |
+
"sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9",
|
| 381 |
+
"sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42",
|
| 382 |
+
"sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2",
|
| 383 |
+
"sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39",
|
| 384 |
+
"sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc",
|
| 385 |
+
"sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698",
|
| 386 |
+
"sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed",
|
| 387 |
+
"sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015",
|
| 388 |
+
"sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24",
|
| 389 |
+
"sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"
|
| 390 |
+
],
|
| 391 |
+
"index": "pypi",
|
| 392 |
+
"markers": "python_version >= '3.9'",
|
| 393 |
+
"version": "==2.2.3"
|
| 394 |
+
},
|
| 395 |
+
"pillow": {
|
| 396 |
+
"hashes": [
|
| 397 |
+
"sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7",
|
| 398 |
+
"sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5",
|
| 399 |
+
"sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903",
|
| 400 |
+
"sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2",
|
| 401 |
+
"sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38",
|
| 402 |
+
"sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2",
|
| 403 |
+
"sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9",
|
| 404 |
+
"sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f",
|
| 405 |
+
"sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc",
|
| 406 |
+
"sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8",
|
| 407 |
+
"sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d",
|
| 408 |
+
"sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2",
|
| 409 |
+
"sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316",
|
| 410 |
+
"sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a",
|
| 411 |
+
"sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25",
|
| 412 |
+
"sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd",
|
| 413 |
+
"sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba",
|
| 414 |
+
"sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc",
|
| 415 |
+
"sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273",
|
| 416 |
+
"sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa",
|
| 417 |
+
"sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a",
|
| 418 |
+
"sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b",
|
| 419 |
+
"sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a",
|
| 420 |
+
"sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae",
|
| 421 |
+
"sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291",
|
| 422 |
+
"sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97",
|
| 423 |
+
"sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06",
|
| 424 |
+
"sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904",
|
| 425 |
+
"sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b",
|
| 426 |
+
"sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b",
|
| 427 |
+
"sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8",
|
| 428 |
+
"sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527",
|
| 429 |
+
"sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947",
|
| 430 |
+
"sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb",
|
| 431 |
+
"sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003",
|
| 432 |
+
"sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5",
|
| 433 |
+
"sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f",
|
| 434 |
+
"sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739",
|
| 435 |
+
"sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944",
|
| 436 |
+
"sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830",
|
| 437 |
+
"sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f",
|
| 438 |
+
"sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3",
|
| 439 |
+
"sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4",
|
| 440 |
+
"sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84",
|
| 441 |
+
"sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7",
|
| 442 |
+
"sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6",
|
| 443 |
+
"sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6",
|
| 444 |
+
"sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9",
|
| 445 |
+
"sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de",
|
| 446 |
+
"sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4",
|
| 447 |
+
"sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47",
|
| 448 |
+
"sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd",
|
| 449 |
+
"sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50",
|
| 450 |
+
"sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c",
|
| 451 |
+
"sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086",
|
| 452 |
+
"sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba",
|
| 453 |
+
"sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306",
|
| 454 |
+
"sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699",
|
| 455 |
+
"sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e",
|
| 456 |
+
"sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488",
|
| 457 |
+
"sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa",
|
| 458 |
+
"sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2",
|
| 459 |
+
"sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3",
|
| 460 |
+
"sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9",
|
| 461 |
+
"sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923",
|
| 462 |
+
"sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2",
|
| 463 |
+
"sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790",
|
| 464 |
+
"sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734",
|
| 465 |
+
"sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916",
|
| 466 |
+
"sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1",
|
| 467 |
+
"sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f",
|
| 468 |
+
"sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798",
|
| 469 |
+
"sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb",
|
| 470 |
+
"sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2",
|
| 471 |
+
"sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"
|
| 472 |
+
],
|
| 473 |
+
"index": "pypi",
|
| 474 |
+
"markers": "python_version >= '3.9'",
|
| 475 |
+
"version": "==11.0.0"
|
| 476 |
+
},
|
| 477 |
+
"python-dateutil": {
|
| 478 |
+
"hashes": [
|
| 479 |
+
"sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3",
|
| 480 |
+
"sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"
|
| 481 |
+
],
|
| 482 |
+
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
| 483 |
+
"version": "==2.9.0.post0"
|
| 484 |
+
},
|
| 485 |
+
"pytz": {
|
| 486 |
+
"hashes": [
|
| 487 |
+
"sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a",
|
| 488 |
+
"sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"
|
| 489 |
+
],
|
| 490 |
+
"version": "==2024.2"
|
| 491 |
+
},
|
| 492 |
+
"pyyaml": {
|
| 493 |
+
"hashes": [
|
| 494 |
+
"sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff",
|
| 495 |
+
"sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48",
|
| 496 |
+
"sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086",
|
| 497 |
+
"sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e",
|
| 498 |
+
"sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133",
|
| 499 |
+
"sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5",
|
| 500 |
+
"sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484",
|
| 501 |
+
"sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee",
|
| 502 |
+
"sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5",
|
| 503 |
+
"sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68",
|
| 504 |
+
"sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a",
|
| 505 |
+
"sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf",
|
| 506 |
+
"sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99",
|
| 507 |
+
"sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8",
|
| 508 |
+
"sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85",
|
| 509 |
+
"sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19",
|
| 510 |
+
"sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc",
|
| 511 |
+
"sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a",
|
| 512 |
+
"sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1",
|
| 513 |
+
"sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317",
|
| 514 |
+
"sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c",
|
| 515 |
+
"sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631",
|
| 516 |
+
"sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d",
|
| 517 |
+
"sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652",
|
| 518 |
+
"sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5",
|
| 519 |
+
"sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e",
|
| 520 |
+
"sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b",
|
| 521 |
+
"sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8",
|
| 522 |
+
"sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476",
|
| 523 |
+
"sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706",
|
| 524 |
+
"sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563",
|
| 525 |
+
"sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237",
|
| 526 |
+
"sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b",
|
| 527 |
+
"sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083",
|
| 528 |
+
"sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180",
|
| 529 |
+
"sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425",
|
| 530 |
+
"sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e",
|
| 531 |
+
"sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f",
|
| 532 |
+
"sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725",
|
| 533 |
+
"sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183",
|
| 534 |
+
"sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab",
|
| 535 |
+
"sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774",
|
| 536 |
+
"sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725",
|
| 537 |
+
"sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e",
|
| 538 |
+
"sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5",
|
| 539 |
+
"sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d",
|
| 540 |
+
"sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290",
|
| 541 |
+
"sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44",
|
| 542 |
+
"sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed",
|
| 543 |
+
"sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4",
|
| 544 |
+
"sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba",
|
| 545 |
+
"sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12",
|
| 546 |
+
"sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"
|
| 547 |
+
],
|
| 548 |
+
"markers": "python_version >= '3.8'",
|
| 549 |
+
"version": "==6.0.2"
|
| 550 |
+
},
|
| 551 |
+
"regex": {
|
| 552 |
+
"hashes": [
|
| 553 |
+
"sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623",
|
| 554 |
+
"sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199",
|
| 555 |
+
"sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664",
|
| 556 |
+
"sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f",
|
| 557 |
+
"sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca",
|
| 558 |
+
"sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066",
|
| 559 |
+
"sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca",
|
| 560 |
+
"sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39",
|
| 561 |
+
"sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d",
|
| 562 |
+
"sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6",
|
| 563 |
+
"sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35",
|
| 564 |
+
"sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408",
|
| 565 |
+
"sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5",
|
| 566 |
+
"sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a",
|
| 567 |
+
"sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9",
|
| 568 |
+
"sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92",
|
| 569 |
+
"sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766",
|
| 570 |
+
"sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168",
|
| 571 |
+
"sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca",
|
| 572 |
+
"sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508",
|
| 573 |
+
"sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df",
|
| 574 |
+
"sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf",
|
| 575 |
+
"sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b",
|
| 576 |
+
"sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4",
|
| 577 |
+
"sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268",
|
| 578 |
+
"sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6",
|
| 579 |
+
"sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c",
|
| 580 |
+
"sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62",
|
| 581 |
+
"sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231",
|
| 582 |
+
"sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36",
|
| 583 |
+
"sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba",
|
| 584 |
+
"sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4",
|
| 585 |
+
"sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e",
|
| 586 |
+
"sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822",
|
| 587 |
+
"sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4",
|
| 588 |
+
"sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d",
|
| 589 |
+
"sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71",
|
| 590 |
+
"sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50",
|
| 591 |
+
"sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d",
|
| 592 |
+
"sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad",
|
| 593 |
+
"sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8",
|
| 594 |
+
"sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8",
|
| 595 |
+
"sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8",
|
| 596 |
+
"sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd",
|
| 597 |
+
"sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16",
|
| 598 |
+
"sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664",
|
| 599 |
+
"sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a",
|
| 600 |
+
"sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f",
|
| 601 |
+
"sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd",
|
| 602 |
+
"sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a",
|
| 603 |
+
"sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9",
|
| 604 |
+
"sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199",
|
| 605 |
+
"sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d",
|
| 606 |
+
"sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963",
|
| 607 |
+
"sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009",
|
| 608 |
+
"sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a",
|
| 609 |
+
"sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679",
|
| 610 |
+
"sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96",
|
| 611 |
+
"sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42",
|
| 612 |
+
"sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8",
|
| 613 |
+
"sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e",
|
| 614 |
+
"sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7",
|
| 615 |
+
"sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8",
|
| 616 |
+
"sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802",
|
| 617 |
+
"sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366",
|
| 618 |
+
"sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137",
|
| 619 |
+
"sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784",
|
| 620 |
+
"sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29",
|
| 621 |
+
"sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3",
|
| 622 |
+
"sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771",
|
| 623 |
+
"sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60",
|
| 624 |
+
"sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a",
|
| 625 |
+
"sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4",
|
| 626 |
+
"sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0",
|
| 627 |
+
"sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84",
|
| 628 |
+
"sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd",
|
| 629 |
+
"sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1",
|
| 630 |
+
"sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776",
|
| 631 |
+
"sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142",
|
| 632 |
+
"sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89",
|
| 633 |
+
"sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c",
|
| 634 |
+
"sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8",
|
| 635 |
+
"sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35",
|
| 636 |
+
"sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a",
|
| 637 |
+
"sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86",
|
| 638 |
+
"sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9",
|
| 639 |
+
"sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64",
|
| 640 |
+
"sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554",
|
| 641 |
+
"sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85",
|
| 642 |
+
"sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb",
|
| 643 |
+
"sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0",
|
| 644 |
+
"sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8",
|
| 645 |
+
"sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb",
|
| 646 |
+
"sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"
|
| 647 |
+
],
|
| 648 |
+
"markers": "python_version >= '3.8'",
|
| 649 |
+
"version": "==2024.9.11"
|
| 650 |
+
},
|
| 651 |
+
"requests": {
|
| 652 |
+
"hashes": [
|
| 653 |
+
"sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760",
|
| 654 |
+
"sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"
|
| 655 |
+
],
|
| 656 |
+
"index": "pypi",
|
| 657 |
+
"markers": "python_version >= '3.8'",
|
| 658 |
+
"version": "==2.32.3"
|
| 659 |
+
},
|
| 660 |
+
"safetensors": {
|
| 661 |
+
"hashes": [
|
| 662 |
+
"sha256:01c8f00da537af711979e1b42a69a8ec9e1d7112f208e0e9b8a35d2c381085ef",
|
| 663 |
+
"sha256:023b6e5facda76989f4cba95a861b7e656b87e225f61811065d5c501f78cdb3f",
|
| 664 |
+
"sha256:09566792588d77b68abe53754c9f1308fadd35c9f87be939e22c623eaacbed6b",
|
| 665 |
+
"sha256:098923e2574ff237c517d6e840acada8e5b311cb1fa226019105ed82e9c3b62f",
|
| 666 |
+
"sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42",
|
| 667 |
+
"sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c",
|
| 668 |
+
"sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c",
|
| 669 |
+
"sha256:13ca0902d2648775089fa6a0c8fc9e6390c5f8ee576517d33f9261656f851e3f",
|
| 670 |
+
"sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50",
|
| 671 |
+
"sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f",
|
| 672 |
+
"sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f",
|
| 673 |
+
"sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c",
|
| 674 |
+
"sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27",
|
| 675 |
+
"sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e",
|
| 676 |
+
"sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3",
|
| 677 |
+
"sha256:309aaec9b66cbf07ad3a2e5cb8a03205663324fea024ba391594423d0f00d9fe",
|
| 678 |
+
"sha256:313514b0b9b73ff4ddfb4edd71860696dbe3c1c9dc4d5cc13dbd74da283d2cbf",
|
| 679 |
+
"sha256:31fa33ee326f750a2f2134a6174773c281d9a266ccd000bd4686d8021f1f3dac",
|
| 680 |
+
"sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c",
|
| 681 |
+
"sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df",
|
| 682 |
+
"sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646",
|
| 683 |
+
"sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca",
|
| 684 |
+
"sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e",
|
| 685 |
+
"sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89",
|
| 686 |
+
"sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56",
|
| 687 |
+
"sha256:500cac01d50b301ab7bb192353317035011c5ceeef0fca652f9f43c000bb7f8d",
|
| 688 |
+
"sha256:52452fa5999dc50c4decaf0c53aa28371f7f1e0fe5c2dd9129059fbe1e1599c7",
|
| 689 |
+
"sha256:53946c5813b8f9e26103c5efff4a931cc45d874f45229edd68557ffb35ffb9f8",
|
| 690 |
+
"sha256:540ce6c4bf6b58cb0fd93fa5f143bc0ee341c93bb4f9287ccd92cf898cc1b0dd",
|
| 691 |
+
"sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679",
|
| 692 |
+
"sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688",
|
| 693 |
+
"sha256:5a2d68a523a4cefd791156a4174189a4114cf0bf9c50ceb89f261600f3b2b81a",
|
| 694 |
+
"sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68",
|
| 695 |
+
"sha256:5f0032bedc869c56f8d26259fe39cd21c5199cd57f2228d817a0e23e8370af25",
|
| 696 |
+
"sha256:60c828a27e852ded2c85fc0f87bf1ec20e464c5cd4d56ff0e0711855cc2e17f8",
|
| 697 |
+
"sha256:63bfd425e25f5c733f572e2246e08a1c38bd6f2e027d3f7c87e2e43f228d1345",
|
| 698 |
+
"sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea",
|
| 699 |
+
"sha256:670e95fe34e0d591d0529e5e59fd9d3d72bc77b1444fcaa14dccda4f36b5a38b",
|
| 700 |
+
"sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523",
|
| 701 |
+
"sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b",
|
| 702 |
+
"sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761",
|
| 703 |
+
"sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a",
|
| 704 |
+
"sha256:6d3de65718b86c3eeaa8b73a9c3d123f9307a96bbd7be9698e21e76a56443af5",
|
| 705 |
+
"sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc",
|
| 706 |
+
"sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c",
|
| 707 |
+
"sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4",
|
| 708 |
+
"sha256:75331c0c746f03158ded32465b7d0b0e24c5a22121743662a2393439c43a45cf",
|
| 709 |
+
"sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a",
|
| 710 |
+
"sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc",
|
| 711 |
+
"sha256:77d9b228da8374c7262046a36c1f656ba32a93df6cc51cd4453af932011e77f1",
|
| 712 |
+
"sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c",
|
| 713 |
+
"sha256:78dd8adfb48716233c45f676d6e48534d34b4bceb50162c13d1f0bdf6f78590a",
|
| 714 |
+
"sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e",
|
| 715 |
+
"sha256:8158938cf3324172df024da511839d373c40fbfaa83e9abf467174b2910d7b4c",
|
| 716 |
+
"sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916",
|
| 717 |
+
"sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c",
|
| 718 |
+
"sha256:83c4f13a9e687335c3928f615cd63a37e3f8ef072a3f2a0599fa09f863fb06a2",
|
| 719 |
+
"sha256:868f9df9e99ad1e7f38c52194063a982bc88fedc7d05096f4f8160403aaf4bd6",
|
| 720 |
+
"sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e",
|
| 721 |
+
"sha256:8e8deb16c4321d61ae72533b8451ec4a9af8656d1c61ff81aa49f966406e4b68",
|
| 722 |
+
"sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163",
|
| 723 |
+
"sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920",
|
| 724 |
+
"sha256:9633b663393d5796f0b60249549371e392b75a0b955c07e9c6f8708a87fc841f",
|
| 725 |
+
"sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457",
|
| 726 |
+
"sha256:9cc9449bd0b0bc538bd5e268221f0c5590bc5c14c1934a6ae359d44410dc68c4",
|
| 727 |
+
"sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab",
|
| 728 |
+
"sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b",
|
| 729 |
+
"sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6",
|
| 730 |
+
"sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92",
|
| 731 |
+
"sha256:a0dd565f83b30f2ca79b5d35748d0d99dd4b3454f80e03dfb41f0038e3bdf180",
|
| 732 |
+
"sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f",
|
| 733 |
+
"sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7",
|
| 734 |
+
"sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943",
|
| 735 |
+
"sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090",
|
| 736 |
+
"sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc",
|
| 737 |
+
"sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f",
|
| 738 |
+
"sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461",
|
| 739 |
+
"sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2",
|
| 740 |
+
"sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532",
|
| 741 |
+
"sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1",
|
| 742 |
+
"sha256:b98d40a2ffa560653f6274e15b27b3544e8e3713a44627ce268f419f35c49478",
|
| 743 |
+
"sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0",
|
| 744 |
+
"sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971",
|
| 745 |
+
"sha256:bfeaa1a699c6b9ed514bd15e6a91e74738b71125a9292159e3d6b7f0a53d2cde",
|
| 746 |
+
"sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b",
|
| 747 |
+
"sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5",
|
| 748 |
+
"sha256:c7db3006a4915151ce1913652e907cdede299b974641a83fbc092102ac41b644",
|
| 749 |
+
"sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6",
|
| 750 |
+
"sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04",
|
| 751 |
+
"sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2",
|
| 752 |
+
"sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737",
|
| 753 |
+
"sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c",
|
| 754 |
+
"sha256:d5f23198821e227cfc52d50fa989813513db381255c6d100927b012f0cfec63d",
|
| 755 |
+
"sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891",
|
| 756 |
+
"sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310",
|
| 757 |
+
"sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35",
|
| 758 |
+
"sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142",
|
| 759 |
+
"sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597",
|
| 760 |
+
"sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1",
|
| 761 |
+
"sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab",
|
| 762 |
+
"sha256:e7a97058f96340850da0601a3309f3d29d6191b0702b2da201e54c6e3e44ccf0",
|
| 763 |
+
"sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1",
|
| 764 |
+
"sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22",
|
| 765 |
+
"sha256:f3664ac565d0e809b0b929dae7ccd74e4d3273cd0c6d1220c6430035befb678e",
|
| 766 |
+
"sha256:f4b15f51b4f8f2a512341d9ce3475cacc19c5fdfc5db1f0e19449e75f95c7dc8",
|
| 767 |
+
"sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb",
|
| 768 |
+
"sha256:f6594d130d0ad933d885c6a7b75c5183cb0e8450f799b80a39eae2b8508955eb",
|
| 769 |
+
"sha256:f68bf99ea970960a237f416ea394e266e0361895753df06e3e06e6ea7907d98b",
|
| 770 |
+
"sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed",
|
| 771 |
+
"sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410"
|
| 772 |
+
],
|
| 773 |
+
"markers": "python_version >= '3.7'",
|
| 774 |
+
"version": "==0.4.5"
|
| 775 |
+
},
|
| 776 |
+
"setuptools": {
|
| 777 |
+
"hashes": [
|
| 778 |
+
"sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec",
|
| 779 |
+
"sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"
|
| 780 |
+
],
|
| 781 |
+
"markers": "python_version >= '3.12'",
|
| 782 |
+
"version": "==75.2.0"
|
| 783 |
+
},
|
| 784 |
+
"six": {
|
| 785 |
+
"hashes": [
|
| 786 |
+
"sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
|
| 787 |
+
"sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
|
| 788 |
+
],
|
| 789 |
+
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
| 790 |
+
"version": "==1.16.0"
|
| 791 |
+
},
|
| 792 |
+
"sympy": {
|
| 793 |
+
"hashes": [
|
| 794 |
+
"sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f",
|
| 795 |
+
"sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"
|
| 796 |
+
],
|
| 797 |
+
"markers": "python_version >= '3.9'",
|
| 798 |
+
"version": "==1.13.1"
|
| 799 |
+
},
|
| 800 |
+
"tokenizers": {
|
| 801 |
+
"hashes": [
|
| 802 |
+
"sha256:02e18da58cf115b7c40de973609c35bde95856012ba42a41ee919c77935af251",
|
| 803 |
+
"sha256:03b03cf8b9a32254b1bf8a305fb95c6daf1baae0c1f93b27f2b08c9759f41dee",
|
| 804 |
+
"sha256:03dae629d99068b1ea5416d50de0fea13008f04129cc79af77a2a6392792d93c",
|
| 805 |
+
"sha256:05e41e302c315bd2ed86c02e917bf03a6cf7d2f652c9cee1a0eb0d0f1ca0d32c",
|
| 806 |
+
"sha256:07c4b7be58da142b0730cc4e5fd66bb7bf6f57f4986ddda73833cd39efef8a01",
|
| 807 |
+
"sha256:08aaa0d72bb65058e8c4b0455f61b840b156c557e2aca57627056624c3a93976",
|
| 808 |
+
"sha256:094663dd0e85ee2e573126918747bdb40044a848fde388efb5b09d57bc74c680",
|
| 809 |
+
"sha256:0b4872647ea6f25224e2833b044b0b19084e39400e8ead3cfe751238b0802140",
|
| 810 |
+
"sha256:0b7f515c83397e73292accdbbbedc62264e070bae9682f06061e2ddce67cacaf",
|
| 811 |
+
"sha256:0c6a796ddcd9a19ad13cf146997cd5895a421fe6aec8fd970d69f9117bddb45c",
|
| 812 |
+
"sha256:0d3caf244ce89d24c87545aafc3448be15870096e796c703a0d68547187192e1",
|
| 813 |
+
"sha256:0ecaf7b0e39caeb1aa6dd6e0975c405716c82c1312b55ac4f716ef563a906969",
|
| 814 |
+
"sha256:10be14ebd8082086a342d969e17fc2d6edc856c59dbdbddd25f158fa40eaf043",
|
| 815 |
+
"sha256:128c1110e950534426e2274837fc06b118ab5f2fa61c3436e60e0aada0ccfd67",
|
| 816 |
+
"sha256:130e35e76f9337ed6c31be386e75d4925ea807055acf18ca1a9b0eec03d8fe23",
|
| 817 |
+
"sha256:14e4cf033a2aa207d7ac790e91adca598b679999710a632c4a494aab0fc3a1b2",
|
| 818 |
+
"sha256:17f98fccb5c12ab1ce1f471731a9cd86df5d4bd2cf2880c5a66b229802d96145",
|
| 819 |
+
"sha256:1ba72260449e16c4c2f6f3252823b059fbf2d31b32617e582003f2b18b415c39",
|
| 820 |
+
"sha256:1fbbaf17a393c78d8aedb6a334097c91cb4119a9ced4764ab8cfdc8d254dc9f9",
|
| 821 |
+
"sha256:212231ab7dfcdc879baf4892ca87c726259fa7c887e1688e3f3cead384d8c305",
|
| 822 |
+
"sha256:218e5a3561561ea0f0ef1559c6d95b825308dbec23fb55b70b92589e7ff2e1e8",
|
| 823 |
+
"sha256:2847843c53f445e0f19ea842a4e48b89dd0db4e62ba6e1e47a2749d6ec11f50d",
|
| 824 |
+
"sha256:299c85c1d21135bc01542237979bf25c32efa0d66595dd0069ae259b97fb2dbe",
|
| 825 |
+
"sha256:2f13a2d16032ebc8bd812eb8099b035ac65887d8f0c207261472803b9633cf3e",
|
| 826 |
+
"sha256:31e87fca4f6bbf5cc67481b562147fe932f73d5602734de7dd18a8f2eee9c6dd",
|
| 827 |
+
"sha256:359eceb6a620c965988fc559cebc0a98db26713758ec4df43fb76d41486a8ed5",
|
| 828 |
+
"sha256:37d1e6f616c84fceefa7c6484a01df05caf1e207669121c66213cb5b2911d653",
|
| 829 |
+
"sha256:3d4d218573a3d8b121a1f8c801029d70444ffb6d8f129d4cca1c7b672ee4a24c",
|
| 830 |
+
"sha256:3e0305fc1ec6b1e5052d30d9c1d5c807081a7bd0cae46a33d03117082e91908c",
|
| 831 |
+
"sha256:3ea919687aa7001a8ff1ba36ac64f165c4e89035f57998fa6cedcfd877be619d",
|
| 832 |
+
"sha256:3f84dad1ff1863c648d80628b1b55353d16303431283e4efbb6ab1af56a75832",
|
| 833 |
+
"sha256:407ab666b38e02228fa785e81f7cf79ef929f104bcccf68a64525a54a93ceac9",
|
| 834 |
+
"sha256:42c097390e2f0ed0a5c5d569e6669dd4e9fff7b31c6a5ce6e9c66a61687197de",
|
| 835 |
+
"sha256:439261da7c0a5c88bda97acb284d49fbdaf67e9d3b623c0bfd107512d22787a9",
|
| 836 |
+
"sha256:47c1bcdd61e61136087459cb9e0b069ff23b5568b008265e5cbc927eae3387ce",
|
| 837 |
+
"sha256:48689da7a395df41114f516208d6550e3e905e1239cc5ad386686d9358e9cef0",
|
| 838 |
+
"sha256:4a717dcb08f2dabbf27ae4b6b20cbbb2ad7ed78ce05a829fae100ff4b3c7ff15",
|
| 839 |
+
"sha256:4b39356df4575d37f9b187bb623aab5abb7b62c8cb702867a1768002f814800c",
|
| 840 |
+
"sha256:514cf279b22fa1ae0bc08e143458c74ad3b56cd078b319464959685a35c53d5e",
|
| 841 |
+
"sha256:5170be9ec942f3d1d317817ced8d749b3e1202670865e4fd465e35d8c259de83",
|
| 842 |
+
"sha256:57b7a8880b208866508b06ce365dc631e7a2472a3faa24daa430d046fb56c885",
|
| 843 |
+
"sha256:5dc611e6ac0fa00a41de19c3bf6391a05ea201d2d22b757d63f5491ec0e67faa",
|
| 844 |
+
"sha256:62eb9daea2a2c06bcd8113a5824af8ef8ee7405d3a71123ba4d52c79bb3d9f1a",
|
| 845 |
+
"sha256:62f7fbd3c2c38b179556d879edae442b45f68312019c3a6013e56c3947a4e648",
|
| 846 |
+
"sha256:65f34e5b731a262dfa562820818533c38ce32a45864437f3d9c82f26c139ca7f",
|
| 847 |
+
"sha256:689b93d2e26d04da337ac407acec8b5d081d8d135e3e5066a88edd5bdb5aff89",
|
| 848 |
+
"sha256:6d3ac5c1f48358ffe20086bf065e843c0d0a9fce0d7f0f45d5f2f9fba3609ca5",
|
| 849 |
+
"sha256:712f90ea33f9bd2586b4a90d697c26d56d0a22fd3c91104c5858c4b5b6489a79",
|
| 850 |
+
"sha256:741fb22788482d09d68e73ece1495cfc6d9b29a06c37b3df90564a9cfa688e6d",
|
| 851 |
+
"sha256:7cdf379219e1e1dd432091058dab325a2e6235ebb23e0aec8d0508567c90cd01",
|
| 852 |
+
"sha256:81970b80b8ac126910295f8aab2d7ef962009ea39e0d86d304769493f69aaa1e",
|
| 853 |
+
"sha256:84edcc7cdeeee45ceedb65d518fffb77aec69311c9c8e30f77ad84da3025f002",
|
| 854 |
+
"sha256:86dcd08da163912e17b27bbaba5efdc71b4fbffb841530fdb74c5707f3c49216",
|
| 855 |
+
"sha256:88b3bc76ab4db1ab95ead623d49c95205411e26302cf9f74203e762ac7e85685",
|
| 856 |
+
"sha256:896195eb9dfdc85c8c052e29947169c1fcbe75a254c4b5792cdbd451587bce85",
|
| 857 |
+
"sha256:899152a78b095559c287b4c6d0099469573bb2055347bb8154db106651296f39",
|
| 858 |
+
"sha256:89d5c337d74ea6e5e7dc8af124cf177be843bbb9ca6e58c01f75ea103c12c8a9",
|
| 859 |
+
"sha256:9041ee665d0fa7f5c4ccf0f81f5e6b7087f797f85b143c094126fc2611fec9d0",
|
| 860 |
+
"sha256:910b96ed87316e4277b23c7bcaf667ce849c7cc379a453fa179e7e09290eeb25",
|
| 861 |
+
"sha256:929c8f3afa16a5130a81ab5079c589226273ec618949cce79b46d96e59a84f61",
|
| 862 |
+
"sha256:9300fac73ddc7e4b0330acbdda4efaabf74929a4a61e119a32a181f534a11b47",
|
| 863 |
+
"sha256:9310951c92c9fb91660de0c19a923c432f110dbfad1a2d429fbc44fa956bf64f",
|
| 864 |
+
"sha256:956f21d359ae29dd51ca5726d2c9a44ffafa041c623f5aa33749da87cfa809b9",
|
| 865 |
+
"sha256:96af92e833bd44760fb17f23f402e07a66339c1dcbe17d79a9b55bb0cc4f038e",
|
| 866 |
+
"sha256:998700177b45f70afeb206ad22c08d9e5f3a80639dae1032bf41e8cbc4dada4b",
|
| 867 |
+
"sha256:9af2dc4ee97d037bc6b05fa4429ddc87532c706316c5e11ce2f0596dfcfa77af",
|
| 868 |
+
"sha256:a12c3cebb8c92e9c35a23ab10d3852aee522f385c28d0b4fe48c0b7527d59762",
|
| 869 |
+
"sha256:a25dcb2f41a0a6aac31999e6c96a75e9152fa0127af8ece46c2f784f23b8197a",
|
| 870 |
+
"sha256:a2ffd9a8895575ac636d44500c66dffaef133823b6b25067604fa73bbc5ec09d",
|
| 871 |
+
"sha256:a647c5b7cb896d6430cf3e01b4e9a2d77f719c84cefcef825d404830c2071da2",
|
| 872 |
+
"sha256:a908c69c2897a68f412aa05ba38bfa87a02980df70f5a72fa8490479308b1f2d",
|
| 873 |
+
"sha256:b0874481aea54a178f2bccc45aa2d0c99cd3f79143a0948af6a9a21dcc49173b",
|
| 874 |
+
"sha256:b605c540753e62199bf15cf69c333e934077ef2350262af2ccada46026f83d1c",
|
| 875 |
+
"sha256:b61f561f329ffe4b28367798b89d60c4abf3f815d37413b6352bc6412a359867",
|
| 876 |
+
"sha256:b8c0fc3542cf9370bf92c932eb71bdeb33d2d4aeeb4126d9fd567b60bd04cb30",
|
| 877 |
+
"sha256:bdd67a0e3503a9a7cf8bc5a4a49cdde5fa5bada09a51e4c7e1c73900297539bd",
|
| 878 |
+
"sha256:bfdad27b0e50544f6b838895a373db6114b85112ba5c0cefadffa78d6daae563",
|
| 879 |
+
"sha256:c5ffe0d7f7bfcfa3b2585776ecf11da2e01c317027c8573c78ebcb8985279e23",
|
| 880 |
+
"sha256:cd28a8614f5c82a54ab2463554e84ad79526c5184cf4573bbac2efbbbcead457",
|
| 881 |
+
"sha256:ce6238a3311bb8e4c15b12600927d35c267b92a52c881ef5717a900ca14793f7",
|
| 882 |
+
"sha256:d10766473954397e2d370f215ebed1cc46dcf6fd3906a2a116aa1d6219bfedc3",
|
| 883 |
+
"sha256:d388d1ea8b7447da784e32e3b86a75cce55887e3b22b31c19d0b186b1c677800",
|
| 884 |
+
"sha256:d412a74cf5b3f68a90c615611a5aa4478bb303d1c65961d22db45001df68afcb",
|
| 885 |
+
"sha256:da1001aa46f4490099c82e2facc4fbc06a6a32bf7de3918ba798010954b775e0",
|
| 886 |
+
"sha256:de291633fb9303555793cc544d4a86e858da529b7d0b752bcaf721ae1d74b2c9",
|
| 887 |
+
"sha256:e2e2d47a819d2954f2c1cd0ad51bb58ffac6f53a872d5d82d65d79bf76b9896d",
|
| 888 |
+
"sha256:e53975a6694428a0586534cc1354b2408d4e010a3103117f617cbb550299797c",
|
| 889 |
+
"sha256:e7edb8ec12c100d5458d15b1e47c0eb30ad606a05641f19af7563bc3d1608c14",
|
| 890 |
+
"sha256:e96f6c14c9752bb82145636b614d5a78e9cde95edfbe0a85dad0dd5ddd6ec95c",
|
| 891 |
+
"sha256:e98eee4dca22849fbb56a80acaa899eec5b72055d79637dd6aa15d5e4b8628c9",
|
| 892 |
+
"sha256:ebe63e31f9c1a970c53866d814e35ec2ec26fda03097c486f82f3891cee60830",
|
| 893 |
+
"sha256:ec870fce1ee5248a10be69f7a8408a234d6f2109f8ea827b4f7ecdbf08c9fd15",
|
| 894 |
+
"sha256:ee86d4095d3542d73579e953c2e5e07d9321af2ffea6ecc097d16d538a2dea16",
|
| 895 |
+
"sha256:ef3f1ae08fa9aea5891cbd69df29913e11d3841798e0bfb1ff78b78e4e7ea0a4",
|
| 896 |
+
"sha256:f22dee205329a636148c325921c73cf3e412e87d31f4d9c3153b302a0200057b",
|
| 897 |
+
"sha256:f326a1ac51ae909b9760e34671c26cd0dfe15662f447302a9d5bb2d872bab8ab",
|
| 898 |
+
"sha256:f40df5e0294a95131cc5f0e0eb91fe86d88837abfbee46b9b3610b09860195a7",
|
| 899 |
+
"sha256:f861889707b54a9ab1204030b65fd6c22bdd4a95205deec7994dc22a8baa2ea4",
|
| 900 |
+
"sha256:f9aa93eacd865f2798b9e62f7ce4533cfff4f5fbd50c02926a78e81c74e432cd",
|
| 901 |
+
"sha256:fc9e95ad49c932b80abfbfeaf63b155761e695ad9f8a58c52a47d962d76e310f"
|
| 902 |
+
],
|
| 903 |
+
"markers": "python_version >= '3.7'",
|
| 904 |
+
"version": "==0.20.1"
|
| 905 |
+
},
|
| 906 |
+
"torch": {
|
| 907 |
+
"hashes": [
|
| 908 |
+
"sha256:03e53f577a96e4d41aca472da8faa40e55df89d2273664af390ce1f570e885bd",
|
| 909 |
+
"sha256:15fbc95e38d330e5b0ef1593b7bc0a19f30e5bdad76895a5cffa1a6a044235e9",
|
| 910 |
+
"sha256:2dd40c885a05ef7fe29356cca81be1435a893096ceb984441d6e2c27aff8c6f4",
|
| 911 |
+
"sha256:38c21ff1bd39f076d72ab06e3c88c2ea6874f2e6f235c9450816b6c8e7627094",
|
| 912 |
+
"sha256:499a68a756d3b30d10f7e0f6214dc3767b130b797265db3b1c02e9094e2a07be",
|
| 913 |
+
"sha256:65e0a60894435608334d68c8811e55fd8f73e5bf8ee6f9ccedb0064486a7b418",
|
| 914 |
+
"sha256:6de1fd253e27e7f01f05cd7c37929ae521ca23ca4620cfc7c485299941679112",
|
| 915 |
+
"sha256:7f179373a047b947dec448243f4e6598a1c960fa3bb978a9a7eecd529fbc363f",
|
| 916 |
+
"sha256:83dcf518685db20912b71fc49cbddcc8849438cdb0e9dcc919b02a849e2cd9e8",
|
| 917 |
+
"sha256:9f3df8138a1126a851440b7d5a4869bfb7c9cc43563d64fd9d96d0465b581024",
|
| 918 |
+
"sha256:b81da3bdb58c9de29d0e1361e52f12fcf10a89673f17a11a5c6c7da1cb1a8376",
|
| 919 |
+
"sha256:ba135923295d564355326dc409b6b7f5bd6edc80f764cdaef1fb0a1b23ff2f9c",
|
| 920 |
+
"sha256:bc52d603d87fe1da24439c0d5fdbbb14e0ae4874451d53f0120ffb1f6c192727",
|
| 921 |
+
"sha256:c54db1fade17287aabbeed685d8e8ab3a56fea9dd8d46e71ced2da367f09a49f",
|
| 922 |
+
"sha256:ce4baeba9804da5a346e210b3b70826f5811330c343e4fe1582200359ee77fe5",
|
| 923 |
+
"sha256:ea718746469246cc63b3353afd75698a288344adb55e29b7f814a5d3c0a7c78d",
|
| 924 |
+
"sha256:f499212f1cffea5d587e5f06144630ed9aa9c399bba12ec8905798d833bd1404"
|
| 925 |
+
],
|
| 926 |
+
"index": "pypi",
|
| 927 |
+
"markers": "python_full_version >= '3.8.0'",
|
| 928 |
+
"version": "==2.5.0"
|
| 929 |
+
},
|
| 930 |
+
"tqdm": {
|
| 931 |
+
"hashes": [
|
| 932 |
+
"sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd",
|
| 933 |
+
"sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"
|
| 934 |
+
],
|
| 935 |
+
"markers": "python_version >= '3.7'",
|
| 936 |
+
"version": "==4.66.5"
|
| 937 |
+
},
|
| 938 |
+
"transformers": {
|
| 939 |
+
"hashes": [
|
| 940 |
+
"sha256:3a9e2eb537094db11c3652334d281afa4766c0e5091c4dcdb454e9921bb0d2b7",
|
| 941 |
+
"sha256:e161268ae8bee315eb9e9b4c0b27f1bd6980f91e0fc292d75249193d339704c0"
|
| 942 |
+
],
|
| 943 |
+
"index": "pypi",
|
| 944 |
+
"markers": "python_full_version >= '3.8.0'",
|
| 945 |
+
"version": "==4.46.0"
|
| 946 |
+
},
|
| 947 |
+
"typing-extensions": {
|
| 948 |
+
"hashes": [
|
| 949 |
+
"sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d",
|
| 950 |
+
"sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"
|
| 951 |
+
],
|
| 952 |
+
"markers": "python_version >= '3.8'",
|
| 953 |
+
"version": "==4.12.2"
|
| 954 |
+
},
|
| 955 |
+
"tzdata": {
|
| 956 |
+
"hashes": [
|
| 957 |
+
"sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc",
|
| 958 |
+
"sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"
|
| 959 |
+
],
|
| 960 |
+
"markers": "python_version >= '2'",
|
| 961 |
+
"version": "==2024.2"
|
| 962 |
+
},
|
| 963 |
+
"urllib3": {
|
| 964 |
+
"hashes": [
|
| 965 |
+
"sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac",
|
| 966 |
+
"sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"
|
| 967 |
+
],
|
| 968 |
+
"markers": "python_version >= '3.8'",
|
| 969 |
+
"version": "==2.2.3"
|
| 970 |
+
},
|
| 971 |
+
"zipp": {
|
| 972 |
+
"hashes": [
|
| 973 |
+
"sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350",
|
| 974 |
+
"sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"
|
| 975 |
+
],
|
| 976 |
+
"markers": "python_version >= '3.8'",
|
| 977 |
+
"version": "==3.20.2"
|
| 978 |
+
}
|
| 979 |
+
},
|
| 980 |
+
"develop": {}
|
| 981 |
+
}
|
README.md
CHANGED
|
@@ -2,4 +2,51 @@
|
|
| 2 |
license: mit
|
| 3 |
---
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
license: mit
|
| 3 |
---
|
| 4 |
|
| 5 |
+
# Erasing Concepts from Diffusion Models
|
| 6 |
+
### [Project Website](https://erasing.baulab.info) | [Arxiv Preprint](https://arxiv.org/pdf/2303.07345.pdf) | [Fine-tuned Weights](https://erasing.baulab.info/weights/esd_models/) | [Demo](https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion) <br>
|
| 7 |
+
|
| 8 |
+
<div align='center'>
|
| 9 |
+
<img src = 'images/applications.png'>
|
| 10 |
+
</div>
|
| 11 |
+
|
| 12 |
+
Motivated by recent advancements in text-to-image diffusion, we study erasure of specific concepts from the model's weights. While Stable Diffusion has shown promise in producing explicit or realistic artwork, it has raised concerns regarding its potential for misuse. We propose a fine-tuning method that can erase a visual concept from a pre-trained diffusion model, given only the name of the style and using negative guidance as a teacher. We benchmark our method against previous approaches that remove sexually explicit content and demonstrate its effectiveness, performing on par with Safe Latent Diffusion and censored training.
|
| 13 |
+
|
| 14 |
+
To evaluate artistic style removal, we conduct experiments erasing five modern artists from the network and conduct a user study to assess the human perception of the removed styles. Unlike previous methods, our approach can remove concepts from a diffusion model permanently rather than modifying the output at the inference time, so it cannot be circumvented even if a user has access to model weights
|
| 15 |
+
|
| 16 |
+
Given only a short text description of an undesired visual concept and no additional data, our method fine-tunes model weights to erase the targeted concept. Our method can avoid NSFW content, stop imitation of a specific artist's style, or even erase a whole object class from model output, while preserving the model's behavior and capabilities on other topics.
|
| 17 |
+
|
| 18 |
+
## Fine-tuned Weights
|
| 19 |
+
|
| 20 |
+
The finetuned weights for both NSFW and art style erasures are available on our [project page](https://erasing.baulab.info).
|
| 21 |
+
|
| 22 |
+
## Running Gradio Demo Locally
|
| 23 |
+
|
| 24 |
+
To run the gradio interactive demo locally, clone the files from [demo repository](https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/tree/main) <br>
|
| 25 |
+
|
| 26 |
+
* Create an environment using the packages included in the requirements.txt file
|
| 27 |
+
* Run `python app.py`
|
| 28 |
+
* Open the application in browser at `http://127.0.0.1:7860/`
|
| 29 |
+
* Train, evaluate, and save models using our method
|
| 30 |
+
|
| 31 |
+
## Installation Guide
|
| 32 |
+
|
| 33 |
+
* To get started clone the following repository of Original Stable Diffusion [Link](https://github.com/CompVis/stable-diffusion)
|
| 34 |
+
* Then download the files from our repository to `stable-diffusion` main directory of stable diffusion. This would replace the `ldm` folder of the original repo with our custom `ldm` directory
|
| 35 |
+
* Download the weights from [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4-full-ema.ckpt) and move them to `stable-diffusion/models/ldm/` (This will be `ckpt_path` variable in `train-scripts/train-esd.py`)
|
| 36 |
+
* [Only for training] To convert your trained models to diffusers download the diffusers Unet config from [here](https://huggingface.co/CompVis/stable-diffusion-v1-4/blob/main/unet/config.json) (This will be `diffusers_config_path` variable in `train-scripts/train-esd.py`)
|
| 37 |
+
|
| 38 |
+
## Training Guide
|
| 39 |
+
|
| 40 |
+
After installation, follow these instructions to train a custom ESD model:
|
| 41 |
+
|
| 42 |
+
* `cd stable-diffusion` to the main repository of stable-diffusion
|
| 43 |
+
* [IMPORTANT] Edit `train-script/train-esd.py` and change the default argparser values according to your convenience (especially the config paths)
|
| 44 |
+
* To choose train_method, pick from following `'xattn'`,`'noxattn'`, `'selfattn'`, `'full'`
|
| 45 |
+
* `python train-scripts/train-esd.py --prompt 'your prompt' --train_method 'your choice of training' --devices '0,1'`
|
| 46 |
+
|
| 47 |
+
Note that the default argparser values must be changed!
|
| 48 |
+
|
| 49 |
+
The optimization process for erasing undesired visual concepts from pre-trained diffusion model weights involves using a short text description of the concept as guidance. The ESD model is fine-tuned with the conditioned and unconditioned scores obtained from frozen SD model to guide the output away from the concept being erased. The model learns from it's own knowledge to steer the diffusion process away from the undesired concept.
|
| 50 |
+
<div align='center'>
|
| 51 |
+
<img src = 'images/ESD.png'>
|
| 52 |
+
</div>
|
esd-vangogh_from_vangogh-noxattn_1-epochs_200.pt → models/ESD-U/esd-vangogh_from_vangogh-noxattn_1-epochs_200.pt
RENAMED
|
File without changes
|
esd-picasso_from_picasso-xattn_1-epochs_200.pt → models/ESD-X/esd-picasso_from_picasso-xattn_1-epochs_200.pt
RENAMED
|
File without changes
|