| elf.all_negative_prompts] | |
| self.main_prompt = self.all_prompts[0] | |
| self.main_negative_prompt = self.all_negative_prompts[0] | |
| def cached_params(self, required_prompts, steps, extra_network_data, hires_steps=None, use_old_scheduling=False): | |
| """Returns parameters that invalidate the cond cache if changed""" | |
| return ( | |
| required_prompts, | |
| steps, | |
| hires_steps, | |
| use_old_scheduling, | |
| opts.CLIP_stop_at_last_layers, | |
| shared.sd_model.sd_checkpoint_info, | |
| extra_network_data, | |
| opts.sdxl_crop_left, | |
| opts.sdxl_crop_top, | |
| self.width, | |
| self.height, | |
| ) | |
| def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None): | |
| """ | |
| Returns the result of calling function(shared.sd_model, required_prompts, steps) | |
| using a cache to store the result if the same arguments have been used before. | |
| cache is an array containing two elements. The first element is a tuple | |
| representing the previously used arguments, or None if no arguments | |
| have been used before. The second element is where the previously | |
| computed result is stored. | |
| caches is a list with items described above. | |
| """ | |
| if shared.opts.use_old_scheduling: | |
| old_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, False) | |
| new_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, True) | |
| if old_schedules != new_schedules: | |
| self.extra_generation_params["Old prompt editing timelines"] = True | |
| cached_params = self.cached_params(required_prompts, steps, extra_network_data, hires_steps, shared.opts.use_old_scheduling) | |
| for cache in caches: | |
| if cache[0] is not None and cached_params == cache[0]: | |
| return cache[1] | |
| cache = caches[0] | |
| with devices.autocast(): | |
| cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling) | |
| cache[0] = cached_params | |
| return cache[1] | |
| def setup_conds(self): | |
| prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height) | |
| negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True) | |
| sampler_config = sd_samplers.find_sampler_config(self.sampler_name) | |
| total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps | |
| self.step_multiplier = total_steps // self.steps | |
| self.firstpass_steps = total_steps | |
| self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data) | |
| self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data) | |
| def get_conds(self): | |
| return self.c, self.uc | |
| def parse_extra_network_prompts(self): | |
| self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) | |
| def save_samples(self) -> bool: | |
| """Returns whether generated images need to be written to disk""" | |
| return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped) | |
| class Processed: | |
| def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): | |
| self.images = images_list | |
| self.prompt = p.prompt | |
| self.negative_prompt = p.negative_prompt | |
| self.seed = seed | |
| self.subseed = subseed | |
| self.subseed_strength = p.subseed_strength | |
| self.info = info | |
| self.comments = "".join(f"{comment}\n" for comment in p.comments) | |
| self.width = p.width | |
| self.height = p.height | |
| self.sampler_name = p.sampler_name | |
| self.cfg_scale = p.cfg_scale | |
| self.image_cfg_scale = getattr(p, 'image_cfg_scale', None) | |
| self.steps = p.steps | |
| self.batch_size = p.batch_size | |
| self.restore_faces = p.restore_faces | |
| self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None | |
| self.sd_model_name = p.sd_model_name | |
| self.sd_model_hash = p.sd_model_hash | |
| self.sd_vae_name = p.sd_vae_name | |
| self.sd_vae_hash = p.sd_vae_hash | |
| self.seed_resize_from_w = p.seed_resize_from_w | |
| self.seed_resize_from_h = p.seed_resize_from_h | |
| self.den |