Delete extensions-builtin/forge_preprocessor_revision
Browse files
extensions-builtin/forge_preprocessor_revision/scripts/preprocessor_revision.py
DELETED
|
@@ -1,103 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import copy
|
| 3 |
-
|
| 4 |
-
from modules_forge.supported_preprocessor import PreprocessorClipVision, PreprocessorParameter
|
| 5 |
-
from modules_forge.shared import add_supported_preprocessor
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def revision_conditioning_modifier(model, x, timestep, uncond, cond, cond_scale, model_options, seed):
|
| 9 |
-
revision_conditions = model_options['revision_conditions']
|
| 10 |
-
noise_augmentor = model.noise_augmentor
|
| 11 |
-
noise_augment_merge = 0.0
|
| 12 |
-
ignore_prompt = False
|
| 13 |
-
|
| 14 |
-
adm_inputs = []
|
| 15 |
-
weights = []
|
| 16 |
-
noise_aug = []
|
| 17 |
-
for revision_condition in revision_conditions:
|
| 18 |
-
adm_cond = revision_condition['cond'].image_embeds
|
| 19 |
-
weight = revision_condition["weight"]
|
| 20 |
-
noise_augment = revision_condition["noise_aug"]
|
| 21 |
-
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
|
| 22 |
-
c_adm, noise_level_emb = noise_augmentor(adm_cond.to(x.device),
|
| 23 |
-
noise_level=torch.tensor([noise_level], device=x.device), seed=seed)
|
| 24 |
-
adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight
|
| 25 |
-
weights.append(weight)
|
| 26 |
-
noise_aug.append(noise_augment)
|
| 27 |
-
adm_inputs.append(adm_out)
|
| 28 |
-
if revision_condition["ignore_prompt"]:
|
| 29 |
-
ignore_prompt = True
|
| 30 |
-
|
| 31 |
-
if len(noise_aug) > 1:
|
| 32 |
-
adm_out = torch.stack(adm_inputs).sum(0)
|
| 33 |
-
noise_augment = noise_augment_merge
|
| 34 |
-
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
|
| 35 |
-
c_adm, noise_level_emb = noise_augmentor(adm_out[:, :noise_augmentor.time_embed.dim],
|
| 36 |
-
noise_level=torch.tensor([noise_level], device=x.device))
|
| 37 |
-
adm_out = torch.cat((c_adm, noise_level_emb), 1)
|
| 38 |
-
|
| 39 |
-
new_y = adm_out[:, :1280]
|
| 40 |
-
cond = copy.deepcopy(cond)
|
| 41 |
-
uncond = copy.deepcopy(uncond)
|
| 42 |
-
|
| 43 |
-
for c in cond:
|
| 44 |
-
c['model_conds']['y'].cond[:, :1280] = new_y.clone()
|
| 45 |
-
|
| 46 |
-
for c in uncond:
|
| 47 |
-
c['model_conds']['y'].cond[:, :1280] = torch.zeros_like(new_y)
|
| 48 |
-
|
| 49 |
-
if ignore_prompt:
|
| 50 |
-
for c in cond + uncond:
|
| 51 |
-
c['model_conds']['c_crossattn'].cond = torch.zeros_like(c['model_conds']['c_crossattn'].cond)
|
| 52 |
-
|
| 53 |
-
return model, x, timestep, uncond, cond, cond_scale, model_options, seed
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
class PreprocessorClipVisionForRevision(PreprocessorClipVision):
|
| 57 |
-
def __init__(self, name, url, filename, ignore_prompt=False):
|
| 58 |
-
super().__init__(name, url, filename)
|
| 59 |
-
self.tags = ['Revision']
|
| 60 |
-
self.model_filename_filters = ['Revision']
|
| 61 |
-
self.do_not_need_model = True
|
| 62 |
-
self.ignore_prompt = ignore_prompt
|
| 63 |
-
self.slider_1 = PreprocessorParameter(
|
| 64 |
-
label="Noise Augmentation", minimum=0.0, maximum=1.0, value=0.0, visible=True)
|
| 65 |
-
|
| 66 |
-
def process_before_every_sampling(self, process, cond, mask, *args, **kwargs):
|
| 67 |
-
unit = kwargs['unit']
|
| 68 |
-
|
| 69 |
-
weight = float(unit.weight)
|
| 70 |
-
noise_aug = float(unit.threshold_a)
|
| 71 |
-
|
| 72 |
-
unet = process.sd_model.forge_objects.unet.clone()
|
| 73 |
-
|
| 74 |
-
if 'revision_conditions' not in unet.model_options:
|
| 75 |
-
unet.model_options['revision_conditions'] = []
|
| 76 |
-
|
| 77 |
-
unet.model_options['revision_conditions'].append(dict(
|
| 78 |
-
cond=cond,
|
| 79 |
-
weight=weight,
|
| 80 |
-
noise_aug=noise_aug,
|
| 81 |
-
ignore_prompt=self.ignore_prompt
|
| 82 |
-
))
|
| 83 |
-
|
| 84 |
-
unet.add_conditioning_modifier(revision_conditioning_modifier, ensure_uniqueness=True)
|
| 85 |
-
|
| 86 |
-
process.sd_model.forge_objects.unet = unet
|
| 87 |
-
|
| 88 |
-
return cond, mask
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
add_supported_preprocessor(PreprocessorClipVisionForRevision(
|
| 92 |
-
name='CLIP-G (Revision)',
|
| 93 |
-
url='https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors',
|
| 94 |
-
filename='CLIP-ViT-bigG.safetensors',
|
| 95 |
-
ignore_prompt=False
|
| 96 |
-
))
|
| 97 |
-
|
| 98 |
-
add_supported_preprocessor(PreprocessorClipVisionForRevision(
|
| 99 |
-
name='CLIP-G (Revision ignore prompt)',
|
| 100 |
-
url='https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors',
|
| 101 |
-
filename='CLIP-ViT-bigG.safetensors',
|
| 102 |
-
ignore_prompt=True
|
| 103 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|